system
stringclasses
7 values
input
stringlengths
10
94.5k
output
stringlengths
14
55.5k
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void Split(int * xi, bool * xb, size_t idxi, size_t idxb, size_t N, float threshold) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { xb[(idxb)*N+i] = (((float)xi[(idxi-1)*N+i]) == threshold); } return; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z5SplitPiPbmmmf .globl _Z5SplitPiPbmmmf .p2align 8 .type _Z5SplitPiPbmmmf,@function _Z5SplitPiPbmmmf: s_clause 0x1 s_load_b32 s6, s[0:1], 0x3c s_load_b64 s[2:3], s[0:1], 0x20 s_add_u32 s4, s0, 48 s_addc_u32 s5, s1, 0 s_mov_b32 s7, exec_lo s_waitcnt lgkmcnt(0) s_and_b32 s6, s6, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s6, v[0:1] v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_u64_e64 s[2:3], v[1:2] s_cbranch_execz .LBB0_3 s_clause 0x1 s_load_b256 s[16:23], s[0:1], 0x0 s_load_b32 s1, s[0:1], 0x28 s_load_b32 s0, s[4:5], 0x0 s_waitcnt lgkmcnt(0) s_add_u32 s4, s20, -1 s_addc_u32 s5, s21, -1 s_mul_i32 s8, s4, s3 s_mul_hi_u32 s9, s4, s2 s_mul_i32 s5, s5, s2 s_add_i32 s8, s9, s8 s_mul_i32 s4, s4, s2 s_add_i32 s5, s8, s5 s_mul_i32 s7, s2, s23 s_lshl_b64 s[4:5], s[4:5], 2 s_mul_hi_u32 s8, s2, s22 s_add_u32 s4, s16, s4 s_mul_i32 s9, s3, s22 s_addc_u32 s5, s17, s5 s_add_i32 s7, s8, s7 s_mul_i32 s8, s2, s22 s_add_i32 s9, s7, s9 s_add_u32 s7, s18, s8 s_addc_u32 s8, s19, s9 s_add_i32 s15, s15, s0 s_mov_b32 s9, 0 v_mad_u64_u32 v[3:4], null, s15, s6, v[0:1] s_mul_i32 s6, s0, s6 .p2align 6 .LBB0_2: v_lshlrev_b64 v[4:5], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v4, vcc_lo, s4, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo global_load_b32 v0, v[4:5], off v_ashrrev_i32_e32 v4, 31, v3 v_add_co_u32 v5, vcc_lo, s7, v1 v_add_co_ci_u32_e32 v6, vcc_lo, s8, v2, vcc_lo s_delay_alu instid0(VALU_DEP_3) v_cmp_le_u64_e32 vcc_lo, s[2:3], v[3:4] v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 v_add_nc_u32_e32 v3, s6, v3 s_or_b32 s9, vcc_lo, s9 s_waitcnt vmcnt(0) v_cvt_f32_i32_e32 v0, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_eq_f32_e64 s0, s1, v0 v_cndmask_b32_e64 v0, 0, 1, s0 global_store_b8 v[5:6], v0, off s_and_not1_b32 exec_lo, exec_lo, s9 s_cbranch_execnz .LBB0_2 .LBB0_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z5SplitPiPbmmmf .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 304 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 24 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z5SplitPiPbmmmf, .Lfunc_end0-_Z5SplitPiPbmmmf .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 8 .value_kind: by_value - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 8 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: by_value - .offset: 48 .size: 4 .value_kind: hidden_block_count_x - .offset: 52 .size: 4 .value_kind: hidden_block_count_y - .offset: 56 .size: 4 .value_kind: hidden_block_count_z - .offset: 60 .size: 2 .value_kind: hidden_group_size_x - .offset: 62 .size: 2 .value_kind: hidden_group_size_y - .offset: 64 .size: 2 .value_kind: hidden_group_size_z - .offset: 66 .size: 2 .value_kind: hidden_remainder_x - .offset: 68 .size: 2 .value_kind: hidden_remainder_y - .offset: 70 .size: 2 .value_kind: hidden_remainder_z - .offset: 88 .size: 8 .value_kind: hidden_global_offset_x - .offset: 96 .size: 8 .value_kind: hidden_global_offset_y - .offset: 104 .size: 8 .value_kind: hidden_global_offset_z - .offset: 112 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 304 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z5SplitPiPbmmmf .private_segment_fixed_size: 0 .sgpr_count: 26 .sgpr_spill_count: 0 .symbol: _Z5SplitPiPbmmmf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void Split(int * xi, bool * xb, size_t idxi, size_t idxb, size_t N, float threshold) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { xb[(idxb)*N+i] = (((float)xi[(idxi-1)*N+i]) == threshold); } return; }
.text .file "Split.hip" .globl _Z20__device_stub__SplitPiPbmmmf # -- Begin function _Z20__device_stub__SplitPiPbmmmf .p2align 4, 0x90 .type _Z20__device_stub__SplitPiPbmmmf,@function _Z20__device_stub__SplitPiPbmmmf: # @_Z20__device_stub__SplitPiPbmmmf .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) movss %xmm0, 4(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 4(%rsp), %rax movq %rax, 136(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z5SplitPiPbmmmf, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z20__device_stub__SplitPiPbmmmf, .Lfunc_end0-_Z20__device_stub__SplitPiPbmmmf .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5SplitPiPbmmmf, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z5SplitPiPbmmmf,@object # @_Z5SplitPiPbmmmf .section .rodata,"a",@progbits .globl _Z5SplitPiPbmmmf .p2align 3, 0x0 _Z5SplitPiPbmmmf: .quad _Z20__device_stub__SplitPiPbmmmf .size _Z5SplitPiPbmmmf, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z5SplitPiPbmmmf" .size .L__unnamed_1, 17 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z20__device_stub__SplitPiPbmmmf .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z5SplitPiPbmmmf .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z5SplitPiPbmmmf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */ /* 0x000fe40003f06070 */ /*0050*/ SHF.R.S32.HI R2, RZ, 0x1f, R0 ; /* 0x0000001fff027819 */ /* 0x000fc80000011400 */ /*0060*/ ISETP.GE.U32.AND.EX P0, PT, R2, c[0x0][0x184], PT, P0 ; /* 0x0000610002007a0c */ /* 0x000fda0003f06100 */ /*0070*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0080*/ IMAD.MOV.U32 R9, RZ, RZ, 0x1 ; /* 0x00000001ff097424 */ /* 0x000fe200078e00ff */ /*0090*/ MOV R5, R2 ; /* 0x0000000200057202 */ /* 0x000fe20000000f00 */ /*00a0*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff087624 */ /* 0x000fe200078e00ff */ /*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*00c0*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */ /* 0x000fe200078e0000 */ /*00d0*/ IADD3 R9, P0, -R9, c[0x0][0x170], RZ ; /* 0x00005c0009097a10 */ /* 0x000fc80007f1e1ff */ /*00e0*/ IADD3.X R8, R8, -0x1, RZ, P0, !PT ; /* 0xffffffff08087810 */ /* 0x000fca00007fe4ff */ /*00f0*/ IMAD R2, R8, c[0x0][0x180], RZ ; /* 0x0000600008027a24 */ /* 0x000fc800078e02ff */ /*0100*/ IMAD R7, R9.reuse, c[0x0][0x184], R2 ; /* 0x0000610009077a24 */ /* 0x040fe400078e0202 */ /*0110*/ IMAD.WIDE.U32 R2, R9, c[0x0][0x180], R4 ; /* 0x0000600009027a25 */ /* 0x000fc800078e0004 */ /*0120*/ IMAD.IADD R3, R3, 0x1, R7 ; /* 0x0000000103037824 */ /* 0x000fe200078e0207 */ /*0130*/ LEA R6, P0, R2, c[0x0][0x160], 0x2 ; /* 0x0000580002067a11 */ /* 0x000fc800078010ff */ /*0140*/ LEA.HI.X R7, R2, c[0x0][0x164], R3, 0x2, P0 ; /* 0x0000590002077a11 */ /* 0x000fca00000f1403 */ /*0150*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */ /* 0x0000a2000c1e1900 */ /*0160*/ MOV R11, c[0x0][0x178] ; /* 0x00005e00000b7a02 */ /* 0x000fe20000000f00 */ /*0170*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff0d7624 */ /* 0x000fc800078e00ff */ /*0180*/ IMAD.WIDE.U32 R4, R11, c[0x0][0x180], R4 ; /* 0x000060000b047a25 */ /* 0x000fc800078e0004 */ /*0190*/ IMAD R2, R11, c[0x0][0x184], RZ ; /* 0x000061000b027a24 */ /* 0x000fe400078e02ff */ /*01a0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff077624 */ /* 0x001fe400078e00ff */ /*01b0*/ IMAD R11, R13, c[0x0][0x17c], R2 ; /* 0x00005f000d0b7a24 */ /* 0x000fe200078e0202 */ /*01c0*/ IADD3 R2, P1, R4, c[0x0][0x168], RZ ; /* 0x00005a0004027a10 */ /* 0x000fe20007f3e0ff */ /*01d0*/ IMAD R4, R7, c[0x0][0xc], R0 ; /* 0x0000030007047a24 */ /* 0x000fca00078e0200 */ /*01e0*/ MOV R0, R4 ; /* 0x0000000400007202 */ /* 0x000fe20000000f00 */ /*01f0*/ I2F R3, R6 ; /* 0x0000000600037306 */ /* 0x004e240000201400 */ /*0200*/ FSETP.NEU.AND P0, PT, R3, c[0x0][0x188], PT ; /* 0x0000620003007a0b */ /* 0x001fe40003f0d000 */ /*0210*/ IADD3.X R3, R5, c[0x0][0x16c], R11, P1, !PT ; /* 0x00005b0005037a10 */ /* 0x000fe40000ffe40b */ /*0220*/ SEL R5, RZ, 0x1, P0 ; /* 0x00000001ff057807 */ /* 0x000fe40000000000 */ /*0230*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x180], PT ; /* 0x0000600004007a0c */ /* 0x000fc60003f06070 */ /*0240*/ STG.E.U8 [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x0001e4000c101104 */ /*0250*/ SHF.R.S32.HI R5, RZ, 0x1f, R4 ; /* 0x0000001fff057819 */ /* 0x001fc80000011404 */ /*0260*/ ISETP.GE.U32.AND.EX P0, PT, R5, c[0x0][0x184], PT, P0 ; /* 0x0000610005007a0c */ /* 0x000fda0003f06100 */ /*0270*/ @!P0 BRA 0xf0 ; /* 0xfffffe7000008947 */ /* 0x000fea000383ffff */ /*0280*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0290*/ BRA 0x290; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z5SplitPiPbmmmf .globl _Z5SplitPiPbmmmf .p2align 8 .type _Z5SplitPiPbmmmf,@function _Z5SplitPiPbmmmf: s_clause 0x1 s_load_b32 s6, s[0:1], 0x3c s_load_b64 s[2:3], s[0:1], 0x20 s_add_u32 s4, s0, 48 s_addc_u32 s5, s1, 0 s_mov_b32 s7, exec_lo s_waitcnt lgkmcnt(0) s_and_b32 s6, s6, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s6, v[0:1] v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_u64_e64 s[2:3], v[1:2] s_cbranch_execz .LBB0_3 s_clause 0x1 s_load_b256 s[16:23], s[0:1], 0x0 s_load_b32 s1, s[0:1], 0x28 s_load_b32 s0, s[4:5], 0x0 s_waitcnt lgkmcnt(0) s_add_u32 s4, s20, -1 s_addc_u32 s5, s21, -1 s_mul_i32 s8, s4, s3 s_mul_hi_u32 s9, s4, s2 s_mul_i32 s5, s5, s2 s_add_i32 s8, s9, s8 s_mul_i32 s4, s4, s2 s_add_i32 s5, s8, s5 s_mul_i32 s7, s2, s23 s_lshl_b64 s[4:5], s[4:5], 2 s_mul_hi_u32 s8, s2, s22 s_add_u32 s4, s16, s4 s_mul_i32 s9, s3, s22 s_addc_u32 s5, s17, s5 s_add_i32 s7, s8, s7 s_mul_i32 s8, s2, s22 s_add_i32 s9, s7, s9 s_add_u32 s7, s18, s8 s_addc_u32 s8, s19, s9 s_add_i32 s15, s15, s0 s_mov_b32 s9, 0 v_mad_u64_u32 v[3:4], null, s15, s6, v[0:1] s_mul_i32 s6, s0, s6 .p2align 6 .LBB0_2: v_lshlrev_b64 v[4:5], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v4, vcc_lo, s4, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo global_load_b32 v0, v[4:5], off v_ashrrev_i32_e32 v4, 31, v3 v_add_co_u32 v5, vcc_lo, s7, v1 v_add_co_ci_u32_e32 v6, vcc_lo, s8, v2, vcc_lo s_delay_alu instid0(VALU_DEP_3) v_cmp_le_u64_e32 vcc_lo, s[2:3], v[3:4] v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 v_add_nc_u32_e32 v3, s6, v3 s_or_b32 s9, vcc_lo, s9 s_waitcnt vmcnt(0) v_cvt_f32_i32_e32 v0, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_eq_f32_e64 s0, s1, v0 v_cndmask_b32_e64 v0, 0, 1, s0 global_store_b8 v[5:6], v0, off s_and_not1_b32 exec_lo, exec_lo, s9 s_cbranch_execnz .LBB0_2 .LBB0_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z5SplitPiPbmmmf .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 304 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 24 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z5SplitPiPbmmmf, .Lfunc_end0-_Z5SplitPiPbmmmf .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 8 .value_kind: by_value - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 8 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: by_value - .offset: 48 .size: 4 .value_kind: hidden_block_count_x - .offset: 52 .size: 4 .value_kind: hidden_block_count_y - .offset: 56 .size: 4 .value_kind: hidden_block_count_z - .offset: 60 .size: 2 .value_kind: hidden_group_size_x - .offset: 62 .size: 2 .value_kind: hidden_group_size_y - .offset: 64 .size: 2 .value_kind: hidden_group_size_z - .offset: 66 .size: 2 .value_kind: hidden_remainder_x - .offset: 68 .size: 2 .value_kind: hidden_remainder_y - .offset: 70 .size: 2 .value_kind: hidden_remainder_z - .offset: 88 .size: 8 .value_kind: hidden_global_offset_x - .offset: 96 .size: 8 .value_kind: hidden_global_offset_y - .offset: 104 .size: 8 .value_kind: hidden_global_offset_z - .offset: 112 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 304 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z5SplitPiPbmmmf .private_segment_fixed_size: 0 .sgpr_count: 26 .sgpr_spill_count: 0 .symbol: _Z5SplitPiPbmmmf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0016c68a_00000000-6_Split.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z30__device_stub__Z5SplitPiPbmmmfPiPbmmmf .type _Z30__device_stub__Z5SplitPiPbmmmfPiPbmmmf, @function _Z30__device_stub__Z5SplitPiPbmmmfPiPbmmmf: .LFB2051: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movq %r8, 8(%rsp) movss %xmm0, 4(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 8(%rsp), %rax movq %rax, 144(%rsp) leaq 4(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z5SplitPiPbmmmf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z30__device_stub__Z5SplitPiPbmmmfPiPbmmmf, .-_Z30__device_stub__Z5SplitPiPbmmmfPiPbmmmf .globl _Z5SplitPiPbmmmf .type _Z5SplitPiPbmmmf, @function _Z5SplitPiPbmmmf: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z30__device_stub__Z5SplitPiPbmmmfPiPbmmmf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z5SplitPiPbmmmf, .-_Z5SplitPiPbmmmf .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z5SplitPiPbmmmf" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z5SplitPiPbmmmf(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "Split.hip" .globl _Z20__device_stub__SplitPiPbmmmf # -- Begin function _Z20__device_stub__SplitPiPbmmmf .p2align 4, 0x90 .type _Z20__device_stub__SplitPiPbmmmf,@function _Z20__device_stub__SplitPiPbmmmf: # @_Z20__device_stub__SplitPiPbmmmf .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) movss %xmm0, 4(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 4(%rsp), %rax movq %rax, 136(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z5SplitPiPbmmmf, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z20__device_stub__SplitPiPbmmmf, .Lfunc_end0-_Z20__device_stub__SplitPiPbmmmf .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5SplitPiPbmmmf, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z5SplitPiPbmmmf,@object # @_Z5SplitPiPbmmmf .section .rodata,"a",@progbits .globl _Z5SplitPiPbmmmf .p2align 3, 0x0 _Z5SplitPiPbmmmf: .quad _Z20__device_stub__SplitPiPbmmmf .size _Z5SplitPiPbmmmf, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z5SplitPiPbmmmf" .size .L__unnamed_1, 17 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z20__device_stub__SplitPiPbmmmf .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z5SplitPiPbmmmf .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <curand.h> #include <curand_kernel.h> extern "C" { __global__ void init( unsigned long long int* seed, curandState * state){ int id = threadIdx.x; curand_init(*seed, id, 0, &state[id]); } __device__ void pi(const float &x, float *pars, float &p){ p = expf(-powf(fabsf(x*pars[2]), pars[1])); } __device__ void f(const float &x, float *pars, float &s){ s += x*sinf(x*pars[0]); } __global__ void mcmc(curandState* states, unsigned int * num_samples, float * Pars, int * npar, float * Sigma, float * result){ int id = threadIdx.x; curandState state = states[id]; unsigned int N = *num_samples; float sigma = *Sigma; float *pars = new float[*npar]; memcpy(pars, &Pars[*npar*id], *npar*sizeof(float)); float xi = curand_uniform(&state); float xg = 0.0; float s = 0.0; float p_xi = 0.0; float p_xg = 0.0; pi(xi, pars, p_xi); for(unsigned int i=0;i<N;i++){ xg = sigma*curand_normal(&state)+xi; pi(xg, pars, p_xg); if (curand_uniform(&state)<(p_xg/p_xi)){ xi = xg; p_xi = p_xg; } f(xi, pars, s); } result[id] = s/float(N); delete pars; } }
.file "tmpxft_000a5e2e_00000000-6_kernel_Levy3D.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2245: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2245: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl pi .type pi, @function pi: .LFB2241: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2241: .size pi, .-pi .globl f .type f, @function f: .LFB2242: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2242: .size f, .-f .globl _Z43__device_stub__Z4initPyP17curandStateXORWOWPyP17curandStateXORWOW .type _Z43__device_stub__Z4initPyP17curandStateXORWOWPyP17curandStateXORWOW, @function _Z43__device_stub__Z4initPyP17curandStateXORWOWPyP17curandStateXORWOW: .LFB2267: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L11 .L7: movq 104(%rsp), %rax subq %fs:40, %rax jne .L12 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq init(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L7 .L12: call __stack_chk_fail@PLT .cfi_endproc .LFE2267: .size _Z43__device_stub__Z4initPyP17curandStateXORWOWPyP17curandStateXORWOW, .-_Z43__device_stub__Z4initPyP17curandStateXORWOWPyP17curandStateXORWOW .globl init .type init, @function init: .LFB2268: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z43__device_stub__Z4initPyP17curandStateXORWOWPyP17curandStateXORWOW addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2268: .size init, .-init .globl _Z53__device_stub__Z4mcmcP17curandStateXORWOWPjPfPiS2_S2_P17curandStateXORWOWPjPfPiS2_S2_ .type _Z53__device_stub__Z4mcmcP17curandStateXORWOWPjPfPiS2_S2_P17curandStateXORWOWPjPfPiS2_S2_, @function _Z53__device_stub__Z4mcmcP17curandStateXORWOWPjPfPiS2_S2_P17curandStateXORWOWPjPfPiS2_S2_: .LFB2269: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movq %r8, 8(%rsp) movq %r9, (%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 8(%rsp), %rax movq %rax, 144(%rsp) movq %rsp, %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L19 .L15: movq 168(%rsp), %rax subq %fs:40, %rax jne .L20 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L19: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq mcmc(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L15 .L20: call __stack_chk_fail@PLT .cfi_endproc .LFE2269: .size _Z53__device_stub__Z4mcmcP17curandStateXORWOWPjPfPiS2_S2_P17curandStateXORWOWPjPfPiS2_S2_, .-_Z53__device_stub__Z4mcmcP17curandStateXORWOWPjPfPiS2_S2_P17curandStateXORWOWPjPfPiS2_S2_ .globl mcmc .type mcmc, @function mcmc: .LFB2270: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z53__device_stub__Z4mcmcP17curandStateXORWOWPjPfPiS2_S2_P17curandStateXORWOWPjPfPiS2_S2_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2270: .size mcmc, .-mcmc .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "mcmc" .LC1: .string "init" .LC2: .string "precalc_xorwow_matrix" .LC3: .string "precalc_xorwow_offset_matrix" .LC4: .string "mrg32k3aM1" .LC5: .string "mrg32k3aM2" .LC6: .string "mrg32k3aM1SubSeq" .LC7: .string "mrg32k3aM2SubSeq" .LC8: .string "mrg32k3aM1Seq" .LC9: .string "mrg32k3aM2Seq" .LC10: .string "__cr_lgamma_table" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2272: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq mcmc(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq init(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $102400, %r9d movl $0, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _ZL21precalc_xorwow_matrix(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $102400, %r9d movl $0, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _ZL10mrg32k3aM1(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _ZL10mrg32k3aM2(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2016, %r9d movl $0, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2016, %r9d movl $0, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq _ZL13mrg32k3aM1Seq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _ZL13mrg32k3aM2Seq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $1 .cfi_def_cfa_offset 32 movl $72, %r9d movl $0, %r8d leaq .LC10(%rip), %rdx movq %rdx, %rcx leaq _ZL17__cr_lgamma_table(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2272: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .local _ZL17__cr_lgamma_table .comm _ZL17__cr_lgamma_table,72,32 .local _ZL13mrg32k3aM2Seq .comm _ZL13mrg32k3aM2Seq,2304,32 .local _ZL13mrg32k3aM1Seq .comm _ZL13mrg32k3aM1Seq,2304,32 .local _ZL16mrg32k3aM2SubSeq .comm _ZL16mrg32k3aM2SubSeq,2016,32 .local _ZL16mrg32k3aM1SubSeq .comm _ZL16mrg32k3aM1SubSeq,2016,32 .local _ZL10mrg32k3aM2 .comm _ZL10mrg32k3aM2,2304,32 .local _ZL10mrg32k3aM1 .comm _ZL10mrg32k3aM1,2304,32 .local _ZL28precalc_xorwow_offset_matrix .comm _ZL28precalc_xorwow_offset_matrix,102400,32 .local _ZL21precalc_xorwow_matrix .comm _ZL21precalc_xorwow_matrix,102400,32 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <curand.h> #include <curand_kernel.h> extern "C" { __global__ void init( unsigned long long int* seed, curandState * state){ int id = threadIdx.x; curand_init(*seed, id, 0, &state[id]); } __device__ void pi(const float &x, float *pars, float &p){ p = expf(-powf(fabsf(x*pars[2]), pars[1])); } __device__ void f(const float &x, float *pars, float &s){ s += x*sinf(x*pars[0]); } __global__ void mcmc(curandState* states, unsigned int * num_samples, float * Pars, int * npar, float * Sigma, float * result){ int id = threadIdx.x; curandState state = states[id]; unsigned int N = *num_samples; float sigma = *Sigma; float *pars = new float[*npar]; memcpy(pars, &Pars[*npar*id], *npar*sizeof(float)); float xi = curand_uniform(&state); float xg = 0.0; float s = 0.0; float p_xi = 0.0; float p_xg = 0.0; pi(xi, pars, p_xi); for(unsigned int i=0;i<N;i++){ xg = sigma*curand_normal(&state)+xi; pi(xg, pars, p_xg); if (curand_uniform(&state)<(p_xg/p_xi)){ xi = xg; p_xi = p_xg; } f(xi, pars, s); } result[id] = s/float(N); delete pars; } }
#include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> extern "C" { __global__ void init( unsigned long long int* seed, hiprandState * state){ int id = threadIdx.x; hiprand_init(*seed, id, 0, &state[id]); } __device__ void pi(const float &x, float *pars, float &p){ p = expf(-powf(fabsf(x*pars[2]), pars[1])); } __device__ void f(const float &x, float *pars, float &s){ s += x*sinf(x*pars[0]); } __global__ void mcmc(hiprandState* states, unsigned int * num_samples, float * Pars, int * npar, float * Sigma, float * result){ int id = threadIdx.x; hiprandState state = states[id]; unsigned int N = *num_samples; float sigma = *Sigma; float *pars = new float[*npar]; memcpy(pars, &Pars[*npar*id], *npar*sizeof(float)); float xi = hiprand_uniform(&state); float xg = 0.0; float s = 0.0; float p_xi = 0.0; float p_xg = 0.0; pi(xi, pars, p_xi); for(unsigned int i=0;i<N;i++){ xg = sigma*hiprand_normal(&state)+xi; pi(xg, pars, p_xg); if (hiprand_uniform(&state)<(p_xg/p_xi)){ xi = xg; p_xi = p_xg; } f(xi, pars, s); } result[id] = s/float(N); delete pars; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> extern "C" { __global__ void init( unsigned long long int* seed, hiprandState * state){ int id = threadIdx.x; hiprand_init(*seed, id, 0, &state[id]); } __device__ void pi(const float &x, float *pars, float &p){ p = expf(-powf(fabsf(x*pars[2]), pars[1])); } __device__ void f(const float &x, float *pars, float &s){ s += x*sinf(x*pars[0]); } __global__ void mcmc(hiprandState* states, unsigned int * num_samples, float * Pars, int * npar, float * Sigma, float * result){ int id = threadIdx.x; hiprandState state = states[id]; unsigned int N = *num_samples; float sigma = *Sigma; float *pars = new float[*npar]; memcpy(pars, &Pars[*npar*id], *npar*sizeof(float)); float xi = hiprand_uniform(&state); float xg = 0.0; float s = 0.0; float p_xi = 0.0; float p_xg = 0.0; pi(xi, pars, p_xi); for(unsigned int i=0;i<N;i++){ xg = sigma*hiprand_normal(&state)+xi; pi(xg, pars, p_xg); if (hiprand_uniform(&state)<(p_xg/p_xi)){ xi = xg; p_xi = p_xg; } f(xi, pars, s); } result[id] = s/float(N); delete pars; } }
.text .file "kernel_Levy3D.hip" .globl __device_stub__init # -- Begin function __device_stub__init .p2align 4, 0x90 .type __device_stub__init,@function __device_stub__init: # @__device_stub__init .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $init, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size __device_stub__init, .Lfunc_end0-__device_stub__init .cfi_endproc # -- End function .globl __device_stub__mcmc # -- Begin function __device_stub__mcmc .p2align 4, 0x90 .type __device_stub__mcmc,@function __device_stub__mcmc: # @__device_stub__mcmc .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) movq %r9, 48(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 48(%rsp), %rax movq %rax, 136(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 96(%rsp), %r9 movl $mcmc, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end1: .size __device_stub__mcmc, .Lfunc_end1-__device_stub__mcmc .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $init, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $mcmc, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type init,@object # @init .section .rodata,"a",@progbits .globl init .p2align 3, 0x0 init: .quad __device_stub__init .size init, 8 .type mcmc,@object # @mcmc .globl mcmc .p2align 3, 0x0 mcmc: .quad __device_stub__mcmc .size mcmc, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "init" .size .L__unnamed_1, 5 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "mcmc" .size .L__unnamed_2, 5 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__init .addrsig_sym __device_stub__mcmc .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym init .addrsig_sym mcmc .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000a5e2e_00000000-6_kernel_Levy3D.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2245: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2245: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl pi .type pi, @function pi: .LFB2241: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2241: .size pi, .-pi .globl f .type f, @function f: .LFB2242: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2242: .size f, .-f .globl _Z43__device_stub__Z4initPyP17curandStateXORWOWPyP17curandStateXORWOW .type _Z43__device_stub__Z4initPyP17curandStateXORWOWPyP17curandStateXORWOW, @function _Z43__device_stub__Z4initPyP17curandStateXORWOWPyP17curandStateXORWOW: .LFB2267: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L11 .L7: movq 104(%rsp), %rax subq %fs:40, %rax jne .L12 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq init(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L7 .L12: call __stack_chk_fail@PLT .cfi_endproc .LFE2267: .size _Z43__device_stub__Z4initPyP17curandStateXORWOWPyP17curandStateXORWOW, .-_Z43__device_stub__Z4initPyP17curandStateXORWOWPyP17curandStateXORWOW .globl init .type init, @function init: .LFB2268: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z43__device_stub__Z4initPyP17curandStateXORWOWPyP17curandStateXORWOW addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2268: .size init, .-init .globl _Z53__device_stub__Z4mcmcP17curandStateXORWOWPjPfPiS2_S2_P17curandStateXORWOWPjPfPiS2_S2_ .type _Z53__device_stub__Z4mcmcP17curandStateXORWOWPjPfPiS2_S2_P17curandStateXORWOWPjPfPiS2_S2_, @function _Z53__device_stub__Z4mcmcP17curandStateXORWOWPjPfPiS2_S2_P17curandStateXORWOWPjPfPiS2_S2_: .LFB2269: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movq %r8, 8(%rsp) movq %r9, (%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 8(%rsp), %rax movq %rax, 144(%rsp) movq %rsp, %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L19 .L15: movq 168(%rsp), %rax subq %fs:40, %rax jne .L20 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L19: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq mcmc(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L15 .L20: call __stack_chk_fail@PLT .cfi_endproc .LFE2269: .size _Z53__device_stub__Z4mcmcP17curandStateXORWOWPjPfPiS2_S2_P17curandStateXORWOWPjPfPiS2_S2_, .-_Z53__device_stub__Z4mcmcP17curandStateXORWOWPjPfPiS2_S2_P17curandStateXORWOWPjPfPiS2_S2_ .globl mcmc .type mcmc, @function mcmc: .LFB2270: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z53__device_stub__Z4mcmcP17curandStateXORWOWPjPfPiS2_S2_P17curandStateXORWOWPjPfPiS2_S2_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2270: .size mcmc, .-mcmc .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "mcmc" .LC1: .string "init" .LC2: .string "precalc_xorwow_matrix" .LC3: .string "precalc_xorwow_offset_matrix" .LC4: .string "mrg32k3aM1" .LC5: .string "mrg32k3aM2" .LC6: .string "mrg32k3aM1SubSeq" .LC7: .string "mrg32k3aM2SubSeq" .LC8: .string "mrg32k3aM1Seq" .LC9: .string "mrg32k3aM2Seq" .LC10: .string "__cr_lgamma_table" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2272: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq mcmc(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq init(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $102400, %r9d movl $0, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _ZL21precalc_xorwow_matrix(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $102400, %r9d movl $0, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _ZL10mrg32k3aM1(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _ZL10mrg32k3aM2(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2016, %r9d movl $0, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2016, %r9d movl $0, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq _ZL13mrg32k3aM1Seq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _ZL13mrg32k3aM2Seq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $1 .cfi_def_cfa_offset 32 movl $72, %r9d movl $0, %r8d leaq .LC10(%rip), %rdx movq %rdx, %rcx leaq _ZL17__cr_lgamma_table(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2272: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .local _ZL17__cr_lgamma_table .comm _ZL17__cr_lgamma_table,72,32 .local _ZL13mrg32k3aM2Seq .comm _ZL13mrg32k3aM2Seq,2304,32 .local _ZL13mrg32k3aM1Seq .comm _ZL13mrg32k3aM1Seq,2304,32 .local _ZL16mrg32k3aM2SubSeq .comm _ZL16mrg32k3aM2SubSeq,2016,32 .local _ZL16mrg32k3aM1SubSeq .comm _ZL16mrg32k3aM1SubSeq,2016,32 .local _ZL10mrg32k3aM2 .comm _ZL10mrg32k3aM2,2304,32 .local _ZL10mrg32k3aM1 .comm _ZL10mrg32k3aM1,2304,32 .local _ZL28precalc_xorwow_offset_matrix .comm _ZL28precalc_xorwow_offset_matrix,102400,32 .local _ZL21precalc_xorwow_matrix .comm _ZL21precalc_xorwow_matrix,102400,32 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "kernel_Levy3D.hip" .globl __device_stub__init # -- Begin function __device_stub__init .p2align 4, 0x90 .type __device_stub__init,@function __device_stub__init: # @__device_stub__init .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $init, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size __device_stub__init, .Lfunc_end0-__device_stub__init .cfi_endproc # -- End function .globl __device_stub__mcmc # -- Begin function __device_stub__mcmc .p2align 4, 0x90 .type __device_stub__mcmc,@function __device_stub__mcmc: # @__device_stub__mcmc .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) movq %r9, 48(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 48(%rsp), %rax movq %rax, 136(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 96(%rsp), %r9 movl $mcmc, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end1: .size __device_stub__mcmc, .Lfunc_end1-__device_stub__mcmc .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $init, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $mcmc, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type init,@object # @init .section .rodata,"a",@progbits .globl init .p2align 3, 0x0 init: .quad __device_stub__init .size init, 8 .type mcmc,@object # @mcmc .globl mcmc .p2align 3, 0x0 mcmc: .quad __device_stub__mcmc .size mcmc, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "init" .size .L__unnamed_1, 5 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "mcmc" .size .L__unnamed_2, 5 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__init .addrsig_sym __device_stub__mcmc .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym init .addrsig_sym mcmc .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> #include <vector> #include <random> #include <time.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/copy.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/fill.h> using std::vector; #define SIZE 10 int main() { thrust::device_vector<float> d_V1(SIZE); thrust::device_vector<float> d_V2(SIZE); thrust::device_vector<float> d_V3(SIZE); thrust::host_vector<float> h_V1(SIZE); thrust::host_vector<float> h_V2(SIZE); thrust::host_vector<float> h_V3(SIZE); thrust::sequence(h_V1.begin(), h_V1.end(), 1); thrust::fill(h_V2.begin(), h_V2.end(), 75); std::cout << "----- V1 -----" << std::endl; for(int i = 0; i < SIZE; ++i) std::cout << h_V1[i] << " "; std::cout << std::endl; std::cout << "----- V2 -----" << std::endl; for(int i = 0; i < SIZE; ++i) std::cout << h_V2[i] << " "; std::cout << std::endl; d_V1 = h_V1; d_V2 = h_V2; thrust::transform(d_V1.begin(), d_V1.end(), d_V2.begin(), d_V3.begin(), thrust::minus<float>()); thrust::copy(d_V3.begin(), d_V3.end(), h_V3.begin()); std::cout << "----- V3 -----" << std::endl; for(int i = 0; i < SIZE; ++i) std::cout << h_V3[i] << " "; std::cout << std::endl; return 0; }
code for sm_80 Function : _ZN3cub17CUB_200700_800_NS6detail8for_each13static_kernelINS2_12policy_hub_t12policy_350_tElN6thrust20THRUST_200700_800_NS8cuda_cub11__transform18binary_transform_fINS7_6detail15normal_iteratorINS7_10device_ptrIfEEEESF_SF_NS9_14no_stencil_tagENS7_5minusIfEENS9_21always_true_predicateEEEEEvT0_T1_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */ /* 0x000e620000002100 */ /*0040*/ IMAD.WIDE.U32 R2, R2, 0x200, RZ ; /* 0x0000020002027825 */ /* 0x001fca00078e00ff */ /*0050*/ IADD3 R0, P1, R2.reuse, R9, RZ ; /* 0x0000000902007210 */ /* 0x042fe40007f3e0ff */ /*0060*/ IADD3 R8, P0, -R2, c[0x0][0x160], RZ ; /* 0x0000580002087a10 */ /* 0x000fc60007f1e1ff */ /*0070*/ IMAD.X R5, RZ, RZ, R3, P1 ; /* 0x000000ffff057224 */ /* 0x000fe200008e0603 */ /*0080*/ IADD3.X R3, ~R3, c[0x0][0x164], RZ, P0, !PT ; /* 0x0000590003037a10 */ /* 0x000fe200007fe5ff */ /*0090*/ IMAD.SHL.U32 R2, R0, 0x4, RZ ; /* 0x0000000400027824 */ /* 0x000fe200078e00ff */ /*00a0*/ ISETP.GT.U32.AND P0, PT, R8, 0x1ff, PT ; /* 0x000001ff0800780c */ /* 0x000fe40003f04070 */ /*00b0*/ SHF.L.U64.HI R0, R0, 0x2, R5 ; /* 0x0000000200007819 */ /* 0x000fe40000010205 */ /*00c0*/ ISETP.GT.AND.EX P0, PT, R3, RZ, PT, P0 ; /* 0x000000ff0300720c */ /* 0x000fe40003f04300 */ /*00d0*/ IADD3 R4, P1, R2.reuse, c[0x0][0x168], RZ ; /* 0x00005a0002047a10 */ /* 0x040fe40007f3e0ff */ /*00e0*/ IADD3 R6, P2, R2, c[0x0][0x170], RZ ; /* 0x00005c0002067a10 */ /* 0x000fc40007f5e0ff */ /*00f0*/ IADD3 R2, P3, R2, c[0x0][0x178], RZ ; /* 0x00005e0002027a10 */ /* 0x000fe40007f7e0ff */ /*0100*/ IADD3.X R5, R0.reuse, c[0x0][0x16c], RZ, P1, !PT ; /* 0x00005b0000057a10 */ /* 0x040fe40000ffe4ff */ /*0110*/ IADD3.X R7, R0.reuse, c[0x0][0x174], RZ, P2, !PT ; /* 0x00005d0000077a10 */ /* 0x040fe400017fe4ff */ /*0120*/ IADD3.X R3, R0, c[0x0][0x17c], RZ, P3, !PT ; /* 0x00005f0000037a10 */ /* 0x000fe20001ffe4ff */ /*0130*/ @P0 BRA 0x270 ; /* 0x0000013000000947 */ /* 0x000fea0003800000 */ /*0140*/ IADD3 R0, R9, 0x100, RZ ; /* 0x0000010009007810 */ /* 0x000fe20007ffe0ff */ /*0150*/ BSSY B0, 0x210 ; /* 0x000000b000007945 */ /* 0x000fe20003800000 */ /*0160*/ ISETP.GT.U32.AND P0, PT, R8, R9, PT ; /* 0x000000090800720c */ /* 0x000fe40003f04070 */ /*0170*/ SHF.R.S32.HI R9, RZ, 0x1f, R8 ; /* 0x0000001fff097819 */ /* 0x000fc40000011408 */ /*0180*/ ISETP.GT.U32.AND P1, PT, R8, R0, PT ; /* 0x000000000800720c */ /* 0x000fe40003f24070 */ /*0190*/ ISETP.GT.AND.EX P0, PT, R9.reuse, RZ, PT, P0 ; /* 0x000000ff0900720c */ /* 0x040fe40003f04300 */ /*01a0*/ ISETP.GT.AND.EX P1, PT, R9, RZ, PT, P1 ; /* 0x000000ff0900720c */ /* 0x000fd60003f24310 */ /*01b0*/ @!P0 BRA 0x200 ; /* 0x0000004000008947 */ /* 0x000fea0003800000 */ /*01c0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000ea8000c1e1900 */ /*01d0*/ LDG.E R9, [R6.64] ; /* 0x0000000406097981 */ /* 0x000ea4000c1e1900 */ /*01e0*/ FADD R9, R0, -R9 ; /* 0x8000000900097221 */ /* 0x004fca0000000000 */ /*01f0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x0001e4000c101904 */ /*0200*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0210*/ @!P1 EXIT ; /* 0x000000000000994d */ /* 0x000fea0003800000 */ /*0220*/ LDG.E R6, [R6.64+0x400] ; /* 0x0004000406067981 */ /* 0x000ea8000c1e1900 */ /*0230*/ LDG.E R5, [R4.64+0x400] ; /* 0x0004000404057981 */ /* 0x000ea4000c1e1900 */ /*0240*/ FADD R9, -R6, R5 ; /* 0x0000000506097221 */ /* 0x005fca0000000100 */ /*0250*/ STG.E [R2.64+0x400], R9 ; /* 0x0004000902007986 */ /* 0x000fe2000c101904 */ /*0260*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0270*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000ea8000c1e1900 */ /*0280*/ LDG.E R9, [R6.64] ; /* 0x0000000406097981 */ /* 0x000ea4000c1e1900 */ /*0290*/ FADD R9, R0, -R9 ; /* 0x8000000900097221 */ /* 0x004fca0000000000 */ /*02a0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x000fe8000c101904 */ /*02b0*/ LDG.E R0, [R6.64+0x400] ; /* 0x0004000406007981 */ /* 0x000ea8000c1e1900 */ /*02c0*/ LDG.E R11, [R4.64+0x400] ; /* 0x00040004040b7981 */ /* 0x000ea4000c1e1900 */ /*02d0*/ FADD R11, -R0, R11 ; /* 0x0000000b000b7221 */ /* 0x004fca0000000100 */ /*02e0*/ STG.E [R2.64+0x400], R11 ; /* 0x0004000b02007986 */ /* 0x000fe2000c101904 */ /*02f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0300*/ BRA 0x300; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0380*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0390*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _ZN3cub17CUB_200700_800_NS6detail8for_each13static_kernelINS2_12policy_hub_t12policy_350_tEmN6thrust20THRUST_200700_800_NS8cuda_cub20__uninitialized_fill7functorINS7_10device_ptrIfEEfEEEEvT0_T1_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e620000002100 */ /*0040*/ IMAD.WIDE.U32 R2, R2, 0x200, RZ ; /* 0x0000020002027825 */ /* 0x001fca00078e00ff */ /*0050*/ IADD3 R4, P1, -R2.reuse, c[0x0][0x160], RZ ; /* 0x0000580002047a10 */ /* 0x040fe40007f3e1ff */ /*0060*/ IADD3 R0, P2, R2, R5, RZ ; /* 0x0000000502007210 */ /* 0x002fe40007f5e0ff */ /*0070*/ ISETP.GT.U32.AND P0, PT, R4, 0x1ff, PT ; /* 0x000001ff0400780c */ /* 0x000fe40003f04070 */ /*0080*/ IADD3.X R6, ~R3, c[0x0][0x164], RZ, P1, !PT ; /* 0x0000590003067a10 */ /* 0x000fe20000ffe5ff */ /*0090*/ IMAD.X R3, RZ, RZ, R3, P2 ; /* 0x000000ffff037224 */ /* 0x000fe200010e0603 */ /*00a0*/ LEA R2, P1, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000027a11 */ /* 0x000fe400078210ff */ /*00b0*/ ISETP.GT.U32.AND.EX P0, PT, R6, RZ, PT, P0 ; /* 0x000000ff0600720c */ /* 0x000fc40003f04100 */ /*00c0*/ LEA.HI.X R3, R0, c[0x0][0x16c], R3, 0x2, P1 ; /* 0x00005b0000037a11 */ /* 0x000fd600008f1403 */ /*00d0*/ @P0 BRA 0x1a0 ; /* 0x000000c000000947 */ /* 0x000fea0003800000 */ /*00e0*/ ISETP.GT.U32.AND P0, PT, R4, R5, PT ; /* 0x000000050400720c */ /* 0x000fe40003f04070 */ /*00f0*/ SHF.R.S32.HI R6, RZ, 0x1f, R4 ; /* 0x0000001fff067819 */ /* 0x000fe40000011404 */ /*0100*/ IADD3 R0, R5, 0x100, RZ ; /* 0x0000010005007810 */ /* 0x000fe40007ffe0ff */ /*0110*/ ISETP.GT.U32.AND.EX P0, PT, R6, RZ, PT, P0 ; /* 0x000000ff0600720c */ /* 0x000fda0003f04100 */ /*0120*/ @P0 IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff070624 */ /* 0x000fca00078e00ff */ /*0130*/ @P0 STG.E [R2.64], R7 ; /* 0x0000000702000986 */ /* 0x0001e2000c101904 */ /*0140*/ ISETP.GT.U32.AND P0, PT, R4, R0, PT ; /* 0x000000000400720c */ /* 0x000fc80003f04070 */ /*0150*/ ISETP.GT.U32.AND.EX P0, PT, R6, RZ, PT, P0 ; /* 0x000000ff0600720c */ /* 0x000fda0003f04100 */ /*0160*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0170*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff057624 */ /* 0x001fca00078e00ff */ /*0180*/ STG.E [R2.64+0x400], R5 ; /* 0x0004000502007986 */ /* 0x000fe2000c101904 */ /*0190*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01a0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff057624 */ /* 0x000fca00078e00ff */ /*01b0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe8000c101904 */ /*01c0*/ STG.E [R2.64+0x400], R5 ; /* 0x0004000502007986 */ /* 0x000fe2000c101904 */ /*01d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <vector> #include <random> #include <time.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/copy.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/fill.h> using std::vector; #define SIZE 10 int main() { thrust::device_vector<float> d_V1(SIZE); thrust::device_vector<float> d_V2(SIZE); thrust::device_vector<float> d_V3(SIZE); thrust::host_vector<float> h_V1(SIZE); thrust::host_vector<float> h_V2(SIZE); thrust::host_vector<float> h_V3(SIZE); thrust::sequence(h_V1.begin(), h_V1.end(), 1); thrust::fill(h_V2.begin(), h_V2.end(), 75); std::cout << "----- V1 -----" << std::endl; for(int i = 0; i < SIZE; ++i) std::cout << h_V1[i] << " "; std::cout << std::endl; std::cout << "----- V2 -----" << std::endl; for(int i = 0; i < SIZE; ++i) std::cout << h_V2[i] << " "; std::cout << std::endl; d_V1 = h_V1; d_V2 = h_V2; thrust::transform(d_V1.begin(), d_V1.end(), d_V2.begin(), d_V3.begin(), thrust::minus<float>()); thrust::copy(d_V3.begin(), d_V3.end(), h_V3.begin()); std::cout << "----- V3 -----" << std::endl; for(int i = 0; i < SIZE; ++i) std::cout << h_V3[i] << " "; std::cout << std::endl; return 0; }
#include <hip/hip_runtime.h> #include <iostream> #include <vector> #include <random> #include <time.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/copy.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/fill.h> using std::vector; #define SIZE 10 int main() { thrust::device_vector<float> d_V1(SIZE); thrust::device_vector<float> d_V2(SIZE); thrust::device_vector<float> d_V3(SIZE); thrust::host_vector<float> h_V1(SIZE); thrust::host_vector<float> h_V2(SIZE); thrust::host_vector<float> h_V3(SIZE); thrust::sequence(h_V1.begin(), h_V1.end(), 1); thrust::fill(h_V2.begin(), h_V2.end(), 75); std::cout << "----- V1 -----" << std::endl; for(int i = 0; i < SIZE; ++i) std::cout << h_V1[i] << " "; std::cout << std::endl; std::cout << "----- V2 -----" << std::endl; for(int i = 0; i < SIZE; ++i) std::cout << h_V2[i] << " "; std::cout << std::endl; d_V1 = h_V1; d_V2 = h_V2; thrust::transform(d_V1.begin(), d_V1.end(), d_V2.begin(), d_V3.begin(), thrust::minus<float>()); thrust::copy(d_V3.begin(), d_V3.end(), h_V3.begin()); std::cout << "----- V3 -----" << std::endl; for(int i = 0; i < SIZE; ++i) std::cout << h_V3[i] << " "; std::cout << std::endl; return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <vector> #include <random> #include <time.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/copy.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/fill.h> using std::vector; #define SIZE 10 int main() { thrust::device_vector<float> d_V1(SIZE); thrust::device_vector<float> d_V2(SIZE); thrust::device_vector<float> d_V3(SIZE); thrust::host_vector<float> h_V1(SIZE); thrust::host_vector<float> h_V2(SIZE); thrust::host_vector<float> h_V3(SIZE); thrust::sequence(h_V1.begin(), h_V1.end(), 1); thrust::fill(h_V2.begin(), h_V2.end(), 75); std::cout << "----- V1 -----" << std::endl; for(int i = 0; i < SIZE; ++i) std::cout << h_V1[i] << " "; std::cout << std::endl; std::cout << "----- V2 -----" << std::endl; for(int i = 0; i < SIZE; ++i) std::cout << h_V2[i] << " "; std::cout << std::endl; d_V1 = h_V1; d_V2 = h_V2; thrust::transform(d_V1.begin(), d_V1.end(), d_V2.begin(), d_V3.begin(), thrust::minus<float>()); thrust::copy(d_V3.begin(), d_V3.end(), h_V3.begin()); std::cout << "----- V3 -----" << std::endl; for(int i = 0; i < SIZE; ++i) std::cout << h_V3[i] << " "; std::cout << std::endl; return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_,comdat .protected _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_ .globl _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_ .p2align 8 .type _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_,@function _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_: s_load_b128 s[4:7], s[0:1], 0x10 s_lshl_b32 s2, s15, 8 s_waitcnt lgkmcnt(0) s_add_u32 s2, s2, s6 s_addc_u32 s3, 0, s7 s_sub_u32 s4, s4, s2 s_subb_u32 s5, s5, s3 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_u64_e64 s5, 0x100, s[4:5] s_and_b32 s5, s5, exec_lo s_cselect_b32 s4, s4, 0x100 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_cmp_gt_u32_e32 vcc_lo, s4, v0 s_cmpk_eq_i32 s4, 0x100 s_cselect_b32 s4, -1, 0 s_or_b32 s4, s4, vcc_lo s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s5, s4 s_cbranch_execz .LBB0_2 s_clause 0x1 s_load_b64 s[4:5], s[0:1], 0x0 s_load_b32 s6, s[0:1], 0x8 v_lshlrev_b32_e32 v0, 2, v0 s_lshl_b64 s[0:1], s[2:3], 2 s_waitcnt lgkmcnt(0) s_add_u32 s0, s4, s0 s_addc_u32 s1, s5, s1 v_add_co_u32 v0, s0, s0, v0 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v1, null, s1, 0, s0 v_mov_b32_e32 v2, s6 flat_store_b32 v[0:1], v2 .LBB0_2: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 32 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 3 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_,comdat .Lfunc_end0: .size _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_, .Lfunc_end0-_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_ .section .AMDGPU.csdata,"",@progbits .section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,comdat .protected _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .globl _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .p2align 8 .type _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,@function _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_: s_load_b128 s[4:7], s[0:1], 0x20 s_lshl_b32 s2, s15, 8 s_waitcnt lgkmcnt(0) s_add_u32 s2, s2, s6 s_addc_u32 s3, 0, s7 s_sub_u32 s4, s4, s2 s_subb_u32 s5, s5, s3 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_i64_e64 s5, 0x100, s[4:5] s_and_b32 s5, s5, exec_lo s_cselect_b32 s4, s4, 0x100 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_cmp_gt_u32_e32 vcc_lo, s4, v0 s_cmpk_eq_i32 s4, 0x100 s_cselect_b32 s4, -1, 0 s_or_b32 s4, s4, vcc_lo s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s5, s4 s_cbranch_execz .LBB1_2 s_load_b128 s[4:7], s[0:1], 0x0 v_add_co_u32 v0, s2, s2, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_add_co_ci_u32_e64 v1, null, s3, 0, s2 s_load_b64 s[0:1], s[0:1], 0x10 v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 flat_load_b32 v2, v[2:3] flat_load_b32 v3, v[4:5] v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) lgkmcnt(0) v_sub_f32_e32 v2, v2, v3 flat_store_b32 v[0:1], v2 .LBB1_2: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 48 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,comdat .Lfunc_end1: .size _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_, .Lfunc_end1-_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 16 .value_kind: by_value - .offset: 16 .size: 8 .value_kind: by_value - .offset: 24 .size: 8 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 32 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 256 .name: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 3 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .offset: 0 .size: 32 .value_kind: by_value - .offset: 32 .size: 8 .value_kind: by_value - .offset: 40 .size: 8 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 48 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 256 .name: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _ZN3cub17CUB_200700_800_NS6detail8for_each13static_kernelINS2_12policy_hub_t12policy_350_tElN6thrust20THRUST_200700_800_NS8cuda_cub11__transform18binary_transform_fINS7_6detail15normal_iteratorINS7_10device_ptrIfEEEESF_SF_NS9_14no_stencil_tagENS7_5minusIfEENS9_21always_true_predicateEEEEEvT0_T1_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */ /* 0x000e620000002100 */ /*0040*/ IMAD.WIDE.U32 R2, R2, 0x200, RZ ; /* 0x0000020002027825 */ /* 0x001fca00078e00ff */ /*0050*/ IADD3 R0, P1, R2.reuse, R9, RZ ; /* 0x0000000902007210 */ /* 0x042fe40007f3e0ff */ /*0060*/ IADD3 R8, P0, -R2, c[0x0][0x160], RZ ; /* 0x0000580002087a10 */ /* 0x000fc60007f1e1ff */ /*0070*/ IMAD.X R5, RZ, RZ, R3, P1 ; /* 0x000000ffff057224 */ /* 0x000fe200008e0603 */ /*0080*/ IADD3.X R3, ~R3, c[0x0][0x164], RZ, P0, !PT ; /* 0x0000590003037a10 */ /* 0x000fe200007fe5ff */ /*0090*/ IMAD.SHL.U32 R2, R0, 0x4, RZ ; /* 0x0000000400027824 */ /* 0x000fe200078e00ff */ /*00a0*/ ISETP.GT.U32.AND P0, PT, R8, 0x1ff, PT ; /* 0x000001ff0800780c */ /* 0x000fe40003f04070 */ /*00b0*/ SHF.L.U64.HI R0, R0, 0x2, R5 ; /* 0x0000000200007819 */ /* 0x000fe40000010205 */ /*00c0*/ ISETP.GT.AND.EX P0, PT, R3, RZ, PT, P0 ; /* 0x000000ff0300720c */ /* 0x000fe40003f04300 */ /*00d0*/ IADD3 R4, P1, R2.reuse, c[0x0][0x168], RZ ; /* 0x00005a0002047a10 */ /* 0x040fe40007f3e0ff */ /*00e0*/ IADD3 R6, P2, R2, c[0x0][0x170], RZ ; /* 0x00005c0002067a10 */ /* 0x000fc40007f5e0ff */ /*00f0*/ IADD3 R2, P3, R2, c[0x0][0x178], RZ ; /* 0x00005e0002027a10 */ /* 0x000fe40007f7e0ff */ /*0100*/ IADD3.X R5, R0.reuse, c[0x0][0x16c], RZ, P1, !PT ; /* 0x00005b0000057a10 */ /* 0x040fe40000ffe4ff */ /*0110*/ IADD3.X R7, R0.reuse, c[0x0][0x174], RZ, P2, !PT ; /* 0x00005d0000077a10 */ /* 0x040fe400017fe4ff */ /*0120*/ IADD3.X R3, R0, c[0x0][0x17c], RZ, P3, !PT ; /* 0x00005f0000037a10 */ /* 0x000fe20001ffe4ff */ /*0130*/ @P0 BRA 0x270 ; /* 0x0000013000000947 */ /* 0x000fea0003800000 */ /*0140*/ IADD3 R0, R9, 0x100, RZ ; /* 0x0000010009007810 */ /* 0x000fe20007ffe0ff */ /*0150*/ BSSY B0, 0x210 ; /* 0x000000b000007945 */ /* 0x000fe20003800000 */ /*0160*/ ISETP.GT.U32.AND P0, PT, R8, R9, PT ; /* 0x000000090800720c */ /* 0x000fe40003f04070 */ /*0170*/ SHF.R.S32.HI R9, RZ, 0x1f, R8 ; /* 0x0000001fff097819 */ /* 0x000fc40000011408 */ /*0180*/ ISETP.GT.U32.AND P1, PT, R8, R0, PT ; /* 0x000000000800720c */ /* 0x000fe40003f24070 */ /*0190*/ ISETP.GT.AND.EX P0, PT, R9.reuse, RZ, PT, P0 ; /* 0x000000ff0900720c */ /* 0x040fe40003f04300 */ /*01a0*/ ISETP.GT.AND.EX P1, PT, R9, RZ, PT, P1 ; /* 0x000000ff0900720c */ /* 0x000fd60003f24310 */ /*01b0*/ @!P0 BRA 0x200 ; /* 0x0000004000008947 */ /* 0x000fea0003800000 */ /*01c0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000ea8000c1e1900 */ /*01d0*/ LDG.E R9, [R6.64] ; /* 0x0000000406097981 */ /* 0x000ea4000c1e1900 */ /*01e0*/ FADD R9, R0, -R9 ; /* 0x8000000900097221 */ /* 0x004fca0000000000 */ /*01f0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x0001e4000c101904 */ /*0200*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0210*/ @!P1 EXIT ; /* 0x000000000000994d */ /* 0x000fea0003800000 */ /*0220*/ LDG.E R6, [R6.64+0x400] ; /* 0x0004000406067981 */ /* 0x000ea8000c1e1900 */ /*0230*/ LDG.E R5, [R4.64+0x400] ; /* 0x0004000404057981 */ /* 0x000ea4000c1e1900 */ /*0240*/ FADD R9, -R6, R5 ; /* 0x0000000506097221 */ /* 0x005fca0000000100 */ /*0250*/ STG.E [R2.64+0x400], R9 ; /* 0x0004000902007986 */ /* 0x000fe2000c101904 */ /*0260*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0270*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000ea8000c1e1900 */ /*0280*/ LDG.E R9, [R6.64] ; /* 0x0000000406097981 */ /* 0x000ea4000c1e1900 */ /*0290*/ FADD R9, R0, -R9 ; /* 0x8000000900097221 */ /* 0x004fca0000000000 */ /*02a0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x000fe8000c101904 */ /*02b0*/ LDG.E R0, [R6.64+0x400] ; /* 0x0004000406007981 */ /* 0x000ea8000c1e1900 */ /*02c0*/ LDG.E R11, [R4.64+0x400] ; /* 0x00040004040b7981 */ /* 0x000ea4000c1e1900 */ /*02d0*/ FADD R11, -R0, R11 ; /* 0x0000000b000b7221 */ /* 0x004fca0000000100 */ /*02e0*/ STG.E [R2.64+0x400], R11 ; /* 0x0004000b02007986 */ /* 0x000fe2000c101904 */ /*02f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0300*/ BRA 0x300; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0380*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0390*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _ZN3cub17CUB_200700_800_NS6detail8for_each13static_kernelINS2_12policy_hub_t12policy_350_tEmN6thrust20THRUST_200700_800_NS8cuda_cub20__uninitialized_fill7functorINS7_10device_ptrIfEEfEEEEvT0_T1_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e620000002100 */ /*0040*/ IMAD.WIDE.U32 R2, R2, 0x200, RZ ; /* 0x0000020002027825 */ /* 0x001fca00078e00ff */ /*0050*/ IADD3 R4, P1, -R2.reuse, c[0x0][0x160], RZ ; /* 0x0000580002047a10 */ /* 0x040fe40007f3e1ff */ /*0060*/ IADD3 R0, P2, R2, R5, RZ ; /* 0x0000000502007210 */ /* 0x002fe40007f5e0ff */ /*0070*/ ISETP.GT.U32.AND P0, PT, R4, 0x1ff, PT ; /* 0x000001ff0400780c */ /* 0x000fe40003f04070 */ /*0080*/ IADD3.X R6, ~R3, c[0x0][0x164], RZ, P1, !PT ; /* 0x0000590003067a10 */ /* 0x000fe20000ffe5ff */ /*0090*/ IMAD.X R3, RZ, RZ, R3, P2 ; /* 0x000000ffff037224 */ /* 0x000fe200010e0603 */ /*00a0*/ LEA R2, P1, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000027a11 */ /* 0x000fe400078210ff */ /*00b0*/ ISETP.GT.U32.AND.EX P0, PT, R6, RZ, PT, P0 ; /* 0x000000ff0600720c */ /* 0x000fc40003f04100 */ /*00c0*/ LEA.HI.X R3, R0, c[0x0][0x16c], R3, 0x2, P1 ; /* 0x00005b0000037a11 */ /* 0x000fd600008f1403 */ /*00d0*/ @P0 BRA 0x1a0 ; /* 0x000000c000000947 */ /* 0x000fea0003800000 */ /*00e0*/ ISETP.GT.U32.AND P0, PT, R4, R5, PT ; /* 0x000000050400720c */ /* 0x000fe40003f04070 */ /*00f0*/ SHF.R.S32.HI R6, RZ, 0x1f, R4 ; /* 0x0000001fff067819 */ /* 0x000fe40000011404 */ /*0100*/ IADD3 R0, R5, 0x100, RZ ; /* 0x0000010005007810 */ /* 0x000fe40007ffe0ff */ /*0110*/ ISETP.GT.U32.AND.EX P0, PT, R6, RZ, PT, P0 ; /* 0x000000ff0600720c */ /* 0x000fda0003f04100 */ /*0120*/ @P0 IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff070624 */ /* 0x000fca00078e00ff */ /*0130*/ @P0 STG.E [R2.64], R7 ; /* 0x0000000702000986 */ /* 0x0001e2000c101904 */ /*0140*/ ISETP.GT.U32.AND P0, PT, R4, R0, PT ; /* 0x000000000400720c */ /* 0x000fc80003f04070 */ /*0150*/ ISETP.GT.U32.AND.EX P0, PT, R6, RZ, PT, P0 ; /* 0x000000ff0600720c */ /* 0x000fda0003f04100 */ /*0160*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0170*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff057624 */ /* 0x001fca00078e00ff */ /*0180*/ STG.E [R2.64+0x400], R5 ; /* 0x0004000502007986 */ /* 0x000fe2000c101904 */ /*0190*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01a0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff057624 */ /* 0x000fca00078e00ff */ /*01b0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe8000c101904 */ /*01c0*/ STG.E [R2.64+0x400], R5 ; /* 0x0004000502007986 */ /* 0x000fe2000c101904 */ /*01d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_,comdat .protected _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_ .globl _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_ .p2align 8 .type _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_,@function _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_: s_load_b128 s[4:7], s[0:1], 0x10 s_lshl_b32 s2, s15, 8 s_waitcnt lgkmcnt(0) s_add_u32 s2, s2, s6 s_addc_u32 s3, 0, s7 s_sub_u32 s4, s4, s2 s_subb_u32 s5, s5, s3 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_u64_e64 s5, 0x100, s[4:5] s_and_b32 s5, s5, exec_lo s_cselect_b32 s4, s4, 0x100 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_cmp_gt_u32_e32 vcc_lo, s4, v0 s_cmpk_eq_i32 s4, 0x100 s_cselect_b32 s4, -1, 0 s_or_b32 s4, s4, vcc_lo s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s5, s4 s_cbranch_execz .LBB0_2 s_clause 0x1 s_load_b64 s[4:5], s[0:1], 0x0 s_load_b32 s6, s[0:1], 0x8 v_lshlrev_b32_e32 v0, 2, v0 s_lshl_b64 s[0:1], s[2:3], 2 s_waitcnt lgkmcnt(0) s_add_u32 s0, s4, s0 s_addc_u32 s1, s5, s1 v_add_co_u32 v0, s0, s0, v0 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v1, null, s1, 0, s0 v_mov_b32_e32 v2, s6 flat_store_b32 v[0:1], v2 .LBB0_2: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 32 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 3 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_,comdat .Lfunc_end0: .size _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_, .Lfunc_end0-_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_ .section .AMDGPU.csdata,"",@progbits .section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,comdat .protected _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .globl _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .p2align 8 .type _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,@function _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_: s_load_b128 s[4:7], s[0:1], 0x20 s_lshl_b32 s2, s15, 8 s_waitcnt lgkmcnt(0) s_add_u32 s2, s2, s6 s_addc_u32 s3, 0, s7 s_sub_u32 s4, s4, s2 s_subb_u32 s5, s5, s3 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_i64_e64 s5, 0x100, s[4:5] s_and_b32 s5, s5, exec_lo s_cselect_b32 s4, s4, 0x100 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_cmp_gt_u32_e32 vcc_lo, s4, v0 s_cmpk_eq_i32 s4, 0x100 s_cselect_b32 s4, -1, 0 s_or_b32 s4, s4, vcc_lo s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s5, s4 s_cbranch_execz .LBB1_2 s_load_b128 s[4:7], s[0:1], 0x0 v_add_co_u32 v0, s2, s2, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_add_co_ci_u32_e64 v1, null, s3, 0, s2 s_load_b64 s[0:1], s[0:1], 0x10 v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 flat_load_b32 v2, v[2:3] flat_load_b32 v3, v[4:5] v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) lgkmcnt(0) v_sub_f32_e32 v2, v2, v3 flat_store_b32 v[0:1], v2 .LBB1_2: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 48 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,comdat .Lfunc_end1: .size _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_, .Lfunc_end1-_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 16 .value_kind: by_value - .offset: 16 .size: 8 .value_kind: by_value - .offset: 24 .size: 8 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 32 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 256 .name: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIfEEfEEmLj1EEEvT0_T1_S9_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 3 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .offset: 0 .size: 32 .value_kind: by_value - .offset: 32 .size: 8 .value_kind: by_value - .offset: 40 .size: 8 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 48 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 256 .name: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIfEEEES9_S9_NS3_14no_stencil_tagENS_5minusIfEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/timeb.h> void Multiplication(float *__restrict__ c, float * a, float * b, int N) { #pragma acc parallel loop present(c, a, b) for (int n = 0; n < N; n++) { for (int m = 0; m < N; m++) { float sum = 0.f; for (int k = 0; k < N; k++) { sum += a[k + n * N] * b[k * N + m ]; } c[m + n * N] = sum; } } } int main() { int i; const int N = 4; float **Matrix_A = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_A[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_B = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_B[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_C = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_C[i] = (float*)malloc(N * sizeof(float*)); } float * a = (float *)malloc(N * N * sizeof(float*)), * b = (float *)malloc(N * N * sizeof(float*)), * c = (float *)malloc(N * N * sizeof(float*)); srand(time(NULL)); for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { Matrix_A[i][j] = rand() % 5; Matrix_B[i][j] = rand() % 10; } } for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { a[j + i * N] = Matrix_A[i][j]; b[j + i * N] = Matrix_B[i][j]; } } #pragma acc data copyin (a[0:N*N], b[0:N*N]) copyout (c[0:N*N]) { Multiplication(c, a, b, N); } for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_A[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_B[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { Matrix_C[i][j] = c[j + i * N]; printf("%3.0f ", Matrix_C[i][j]); } printf("\n"); } free(a); free(b); free(c); free(Matrix_A); free(Matrix_B); free(Matrix_C); }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/timeb.h> void Multiplication(float *__restrict__ c, float * a, float * b, int N) { #pragma acc parallel loop present(c, a, b) for (int n = 0; n < N; n++) { for (int m = 0; m < N; m++) { float sum = 0.f; for (int k = 0; k < N; k++) { sum += a[k + n * N] * b[k * N + m ]; } c[m + n * N] = sum; } } } int main() { int i; const int N = 4; float **Matrix_A = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_A[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_B = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_B[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_C = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_C[i] = (float*)malloc(N * sizeof(float*)); } float * a = (float *)malloc(N * N * sizeof(float*)), * b = (float *)malloc(N * N * sizeof(float*)), * c = (float *)malloc(N * N * sizeof(float*)); srand(time(NULL)); for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { Matrix_A[i][j] = rand() % 5; Matrix_B[i][j] = rand() % 10; } } for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { a[j + i * N] = Matrix_A[i][j]; b[j + i * N] = Matrix_B[i][j]; } } #pragma acc data copyin (a[0:N*N], b[0:N*N]) copyout (c[0:N*N]) { Multiplication(c, a, b, N); } for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_A[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_B[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { Matrix_C[i][j] = c[j + i * N]; printf("%3.0f ", Matrix_C[i][j]); } printf("\n"); } free(a); free(b); free(c); free(Matrix_A); free(Matrix_B); free(Matrix_C); }
.file "tmpxft_00169c71_00000000-6_laba6.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z14MultiplicationPfS_S_i .type _Z14MultiplicationPfS_S_i, @function _Z14MultiplicationPfS_S_i: .LFB2057: .cfi_startproc endbr64 testl %ecx, %ecx jle .L11 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movq %rdi, %r9 movq %rdx, %r12 movl %ecx, %ebp movslq %ecx, %r11 leaq 0(,%r11,4), %rcx movq %rsi, %r10 addq %rcx, %rsi movl $0, %ebx .L5: movq %r12, %r8 movl $0, %edi .L8: movq %r8, %rdx movq %r10, %rax pxor %xmm1, %xmm1 .L6: movss (%rax), %xmm0 mulss (%rdx), %xmm0 addss %xmm0, %xmm1 addq $4, %rax addq %rcx, %rdx cmpq %rsi, %rax jne .L6 movss %xmm1, (%r9,%rdi,4) addq $1, %rdi addq $4, %r8 cmpq %r11, %rdi jne .L8 addl $1, %ebx addq %rcx, %r9 addq %rcx, %r10 addq %rcx, %rsi cmpl %ebx, %ebp jne .L5 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L11: .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 ret .cfi_endproc .LFE2057: .size _Z14MultiplicationPfS_S_i, .-_Z14MultiplicationPfS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "%3.0f " .LC2: .string "\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $88, %rsp .cfi_def_cfa_offset 144 movl $32, %edi call malloc@PLT movq %rax, %rbx movq %rax, 16(%rsp) movq %rax, %r12 leaq 32(%rax), %rbp movq %rbp, 48(%rsp) .L15: movl $32, %edi call malloc@PLT movq %rax, (%rbx) addq $8, %rbx cmpq %rbp, %rbx jne .L15 movl $32, %edi call malloc@PLT movq %rax, %rbx movq %rax, 24(%rsp) movq %rax, %rbp leaq 32(%rax), %r13 movq %r13, 64(%rsp) .L16: movl $32, %edi call malloc@PLT movq %rax, (%rbx) addq $8, %rbx cmpq %r13, %rbx jne .L16 movl $32, %edi call malloc@PLT movq %rax, %rbx movq %rax, 56(%rsp) movq %rax, %r13 leaq 32(%rax), %r14 .L17: movl $32, %edi call malloc@PLT movq %rax, (%rbx) addq $8, %rbx cmpq %r14, %rbx jne .L17 movl $128, %edi call malloc@PLT movq %rax, 32(%rsp) movl $128, %edi call malloc@PLT movq %rax, 40(%rsp) movl $128, %edi call malloc@PLT movq %rax, 8(%rsp) movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT movq 24(%rsp), %r14 movq 16(%rsp), %r15 movq %rbp, 72(%rsp) movq %r15, %rbp movq 48(%rsp), %r15 .L18: movl $0, %ebx .L19: call rand@PLT movq 0(%rbp), %rcx movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $33, %rdx movl %eax, %esi sarl $31, %esi subl %esi, %edx leal (%rdx,%rdx,4), %edx subl %edx, %eax pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 movss %xmm0, (%rcx,%rbx) call rand@PLT movq (%r14), %rcx movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $34, %rdx movl %eax, %esi sarl $31, %esi subl %esi, %edx leal (%rdx,%rdx,4), %edx addl %edx, %edx subl %edx, %eax pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 movss %xmm0, (%rcx,%rbx) addq $4, %rbx cmpq $16, %rbx jne .L19 addq $8, %rbp addq $8, %r14 cmpq %r15, %rbp jne .L18 movq 72(%rsp), %rbp movq 32(%rsp), %rcx movq 40(%rsp), %rdx movl $0, %esi movq 16(%rsp), %r9 movq 24(%rsp), %r10 .L20: movq (%r9,%rsi,8), %r8 movq (%r10,%rsi,8), %rdi movl $0, %eax .L21: movss (%r8,%rax), %xmm0 movss %xmm0, (%rcx,%rax) movss (%rdi,%rax), %xmm0 movss %xmm0, (%rdx,%rax) addq $4, %rax cmpq $16, %rax jne .L21 addq $1, %rsi addq $16, %rcx addq $16, %rdx cmpq $4, %rsi jne .L20 movl $4, %ecx movq 40(%rsp), %rdx movq 32(%rsp), %rsi movq 8(%rsp), %rdi call _Z14MultiplicationPfS_S_i leaq .LC1(%rip), %r14 leaq .LC2(%rip), %r15 movq %rbp, 72(%rsp) movq 48(%rsp), %rbp .L23: movl $0, %ebx .L24: movq (%r12), %rax pxor %xmm0, %xmm0 cvtss2sd (%rax,%rbx), %xmm0 movq %r14, %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT addq $4, %rbx cmpq $16, %rbx jne .L24 movq %r15, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $8, %r12 cmpq %rbp, %r12 jne .L23 movq 72(%rsp), %rbp leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC1(%rip), %r12 leaq .LC2(%rip), %r14 movq 64(%rsp), %r15 .L26: movl $0, %ebx .L27: movq 0(%rbp), %rax pxor %xmm0, %xmm0 cvtss2sd (%rax,%rbx), %xmm0 movq %r12, %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT addq $4, %rbx cmpq $16, %rbx jne .L27 movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $8, %rbp cmpq %r15, %rbp jne .L26 leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 8(%rsp), %rbp movl $0, %r14d leaq .LC1(%rip), %r12 leaq .LC2(%rip), %r15 .L29: movl $0, %ebx .L30: movss 0(%rbp,%rbx), %xmm0 movq 0(%r13), %rax movss %xmm0, (%rax,%rbx) cvtss2sd %xmm0, %xmm0 movq %r12, %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT addq $4, %rbx cmpq $16, %rbx jne .L30 movq %r15, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $4, %r14d addq $8, %r13 addq $16, %rbp cmpl $16, %r14d jne .L29 movq 32(%rsp), %rdi call free@PLT movq 40(%rsp), %rdi call free@PLT movq 8(%rsp), %rdi call free@PLT movq 16(%rsp), %rdi call free@PLT movq 24(%rsp), %rdi call free@PLT movq 56(%rsp), %rdi call free@PLT movl $0, %eax addq $88, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/timeb.h> void Multiplication(float *__restrict__ c, float * a, float * b, int N) { #pragma acc parallel loop present(c, a, b) for (int n = 0; n < N; n++) { for (int m = 0; m < N; m++) { float sum = 0.f; for (int k = 0; k < N; k++) { sum += a[k + n * N] * b[k * N + m ]; } c[m + n * N] = sum; } } } int main() { int i; const int N = 4; float **Matrix_A = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_A[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_B = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_B[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_C = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_C[i] = (float*)malloc(N * sizeof(float*)); } float * a = (float *)malloc(N * N * sizeof(float*)), * b = (float *)malloc(N * N * sizeof(float*)), * c = (float *)malloc(N * N * sizeof(float*)); srand(time(NULL)); for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { Matrix_A[i][j] = rand() % 5; Matrix_B[i][j] = rand() % 10; } } for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { a[j + i * N] = Matrix_A[i][j]; b[j + i * N] = Matrix_B[i][j]; } } #pragma acc data copyin (a[0:N*N], b[0:N*N]) copyout (c[0:N*N]) { Multiplication(c, a, b, N); } for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_A[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_B[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { Matrix_C[i][j] = c[j + i * N]; printf("%3.0f ", Matrix_C[i][j]); } printf("\n"); } free(a); free(b); free(c); free(Matrix_A); free(Matrix_B); free(Matrix_C); }
#include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/timeb.h> void Multiplication(float *__restrict__ c, float * a, float * b, int N) { #pragma acc parallel loop present(c, a, b) for (int n = 0; n < N; n++) { for (int m = 0; m < N; m++) { float sum = 0.f; for (int k = 0; k < N; k++) { sum += a[k + n * N] * b[k * N + m ]; } c[m + n * N] = sum; } } } int main() { int i; const int N = 4; float **Matrix_A = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_A[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_B = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_B[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_C = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_C[i] = (float*)malloc(N * sizeof(float*)); } float * a = (float *)malloc(N * N * sizeof(float*)), * b = (float *)malloc(N * N * sizeof(float*)), * c = (float *)malloc(N * N * sizeof(float*)); srand(time(NULL)); for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { Matrix_A[i][j] = rand() % 5; Matrix_B[i][j] = rand() % 10; } } for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { a[j + i * N] = Matrix_A[i][j]; b[j + i * N] = Matrix_B[i][j]; } } #pragma acc data copyin (a[0:N*N], b[0:N*N]) copyout (c[0:N*N]) { Multiplication(c, a, b, N); } for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_A[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_B[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { Matrix_C[i][j] = c[j + i * N]; printf("%3.0f ", Matrix_C[i][j]); } printf("\n"); } free(a); free(b); free(c); free(Matrix_A); free(Matrix_B); free(Matrix_C); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/timeb.h> void Multiplication(float *__restrict__ c, float * a, float * b, int N) { #pragma acc parallel loop present(c, a, b) for (int n = 0; n < N; n++) { for (int m = 0; m < N; m++) { float sum = 0.f; for (int k = 0; k < N; k++) { sum += a[k + n * N] * b[k * N + m ]; } c[m + n * N] = sum; } } } int main() { int i; const int N = 4; float **Matrix_A = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_A[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_B = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_B[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_C = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_C[i] = (float*)malloc(N * sizeof(float*)); } float * a = (float *)malloc(N * N * sizeof(float*)), * b = (float *)malloc(N * N * sizeof(float*)), * c = (float *)malloc(N * N * sizeof(float*)); srand(time(NULL)); for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { Matrix_A[i][j] = rand() % 5; Matrix_B[i][j] = rand() % 10; } } for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { a[j + i * N] = Matrix_A[i][j]; b[j + i * N] = Matrix_B[i][j]; } } #pragma acc data copyin (a[0:N*N], b[0:N*N]) copyout (c[0:N*N]) { Multiplication(c, a, b, N); } for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_A[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_B[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { Matrix_C[i][j] = c[j + i * N]; printf("%3.0f ", Matrix_C[i][j]); } printf("\n"); } free(a); free(b); free(c); free(Matrix_A); free(Matrix_B); free(Matrix_C); }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/timeb.h> void Multiplication(float *__restrict__ c, float * a, float * b, int N) { #pragma acc parallel loop present(c, a, b) for (int n = 0; n < N; n++) { for (int m = 0; m < N; m++) { float sum = 0.f; for (int k = 0; k < N; k++) { sum += a[k + n * N] * b[k * N + m ]; } c[m + n * N] = sum; } } } int main() { int i; const int N = 4; float **Matrix_A = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_A[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_B = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_B[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_C = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_C[i] = (float*)malloc(N * sizeof(float*)); } float * a = (float *)malloc(N * N * sizeof(float*)), * b = (float *)malloc(N * N * sizeof(float*)), * c = (float *)malloc(N * N * sizeof(float*)); srand(time(NULL)); for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { Matrix_A[i][j] = rand() % 5; Matrix_B[i][j] = rand() % 10; } } for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { a[j + i * N] = Matrix_A[i][j]; b[j + i * N] = Matrix_B[i][j]; } } #pragma acc data copyin (a[0:N*N], b[0:N*N]) copyout (c[0:N*N]) { Multiplication(c, a, b, N); } for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_A[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_B[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { Matrix_C[i][j] = c[j + i * N]; printf("%3.0f ", Matrix_C[i][j]); } printf("\n"); } free(a); free(b); free(c); free(Matrix_A); free(Matrix_B); free(Matrix_C); }
.text .file "laba6.hip" .globl _Z14MultiplicationPfS_S_i # -- Begin function _Z14MultiplicationPfS_S_i .p2align 4, 0x90 .type _Z14MultiplicationPfS_S_i,@function _Z14MultiplicationPfS_S_i: # @_Z14MultiplicationPfS_S_i .cfi_startproc # %bb.0: testl %ecx, %ecx jle .LBB0_8 # %bb.1: # %.preheader26.lr.ph pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %ecx, %eax leaq (,%rax,4), %r8 xorl %r9d, %r9d xorl %r10d, %r10d .p2align 4, 0x90 .LBB0_2: # %.preheader26 # =>This Loop Header: Depth=1 # Child Loop BB0_3 Depth 2 # Child Loop BB0_4 Depth 3 movl %r9d, %r11d leaq (%rsi,%r11,4), %r11 movq %r10, %rbx imulq %rax, %rbx leaq (%rdi,%rbx,4), %rbx movq %rdx, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB0_3: # %.preheader # Parent Loop BB0_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB0_4 Depth 3 xorps %xmm0, %xmm0 movq %r14, %r12 xorl %r13d, %r13d .p2align 4, 0x90 .LBB0_4: # Parent Loop BB0_2 Depth=1 # Parent Loop BB0_3 Depth=2 # => This Inner Loop Header: Depth=3 movss (%r11,%r13,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss (%r12), %xmm1 addss %xmm1, %xmm0 incq %r13 addq %r8, %r12 cmpq %r13, %rax jne .LBB0_4 # %bb.5: # %._crit_edge # in Loop: Header=BB0_3 Depth=2 movss %xmm0, (%rbx,%r15,4) incq %r15 addq $4, %r14 cmpq %rax, %r15 jne .LBB0_3 # %bb.6: # %._crit_edge30 # in Loop: Header=BB0_2 Depth=1 incq %r10 addl %ecx, %r9d cmpq %rax, %r10 jne .LBB0_2 # %bb.7: popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .LBB0_8: # %._crit_edge32 retq .Lfunc_end0: .size _Z14MultiplicationPfS_S_i, .Lfunc_end0-_Z14MultiplicationPfS_S_i .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $40, %rsp .cfi_def_cfa_offset 96 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $32, %edi callq malloc movq %rax, %rbx xorl %r14d, %r14d .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movl $32, %edi callq malloc movq %rax, (%rbx,%r14,8) incq %r14 cmpq $4, %r14 jne .LBB1_1 # %bb.2: movl $32, %edi callq malloc movq %rax, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_3: # =>This Inner Loop Header: Depth=1 movl $32, %edi callq malloc movq %rax, (%r14,%r15,8) incq %r15 cmpq $4, %r15 jne .LBB1_3 # %bb.4: movl $32, %edi callq malloc movq %rax, %r12 xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_5: # =>This Inner Loop Header: Depth=1 movl $32, %edi callq malloc movq %rax, (%r12,%r15,8) incq %r15 cmpq $4, %r15 jne .LBB1_5 # %bb.6: movq %r12, 16(%rsp) # 8-byte Spill movl $128, %edi callq malloc movq %rax, %r15 movl $128, %edi callq malloc movq %rax, %r13 movl $128, %edi callq malloc movq %rax, 8(%rsp) # 8-byte Spill xorl %r12d, %r12d xorl %edi, %edi callq time movl %eax, %edi callq srand .p2align 4, 0x90 .LBB1_7: # %.preheader92 # =>This Loop Header: Depth=1 # Child Loop BB1_8 Depth 2 xorl %ebp, %ebp .p2align 4, 0x90 .LBB1_8: # Parent Loop BB1_7 Depth=1 # => This Inner Loop Header: Depth=2 callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $33, %rcx addl %edx, %ecx leal (%rcx,%rcx,4), %ecx subl %ecx, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movq (%rbx,%r12,8), %rax movss %xmm0, (%rax,%rbp,4) callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $34, %rcx addl %edx, %ecx addl %ecx, %ecx leal (%rcx,%rcx,4), %ecx subl %ecx, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movq (%r14,%r12,8), %rax movss %xmm0, (%rax,%rbp,4) incq %rbp cmpq $4, %rbp jne .LBB1_8 # %bb.9: # in Loop: Header=BB1_7 Depth=1 incq %r12 cmpq $4, %r12 jne .LBB1_7 # %bb.10: # %.preheader90.preheader xorl %eax, %eax .p2align 4, 0x90 .LBB1_11: # %.preheader90 # =>This Inner Loop Header: Depth=1 movq (%rbx,%rax), %rcx movq (%r14,%rax), %rdx movups (%rcx), %xmm0 movups %xmm0, (%r15,%rax,2) movups (%rdx), %xmm0 movups %xmm0, (%r13,%rax,2) addq $8, %rax cmpq $32, %rax jne .LBB1_11 # %bb.12: xorl %eax, %eax movq %r15, %rcx movq 8(%rsp), %rbp # 8-byte Reload .p2align 4, 0x90 .LBB1_13: # %.preheader26.i # =>This Loop Header: Depth=1 # Child Loop BB1_14 Depth 2 # Child Loop BB1_15 Depth 3 movq %rax, %rdx shlq $4, %rdx addq %rbp, %rdx movq %r13, %rsi xorl %edi, %edi .p2align 4, 0x90 .LBB1_14: # %.preheader.i # Parent Loop BB1_13 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB1_15 Depth 3 xorps %xmm0, %xmm0 xorl %r8d, %r8d .p2align 4, 0x90 .LBB1_15: # Parent Loop BB1_13 Depth=1 # Parent Loop BB1_14 Depth=2 # => This Inner Loop Header: Depth=3 movss (%rcx,%r8), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss (%rsi,%r8,4), %xmm1 addss %xmm1, %xmm0 addq $4, %r8 cmpq $16, %r8 jne .LBB1_15 # %bb.16: # %._crit_edge.i # in Loop: Header=BB1_14 Depth=2 movss %xmm0, (%rdx,%rdi,4) incq %rdi addq $4, %rsi cmpq $4, %rdi jne .LBB1_14 # %bb.17: # %._crit_edge30.i # in Loop: Header=BB1_13 Depth=1 incq %rax addq $16, %rcx cmpq $4, %rax jne .LBB1_13 # %bb.18: # %.preheader89.preheader movq %r13, 24(%rsp) # 8-byte Spill movq %r15, 32(%rsp) # 8-byte Spill xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_19: # %.preheader89 # =>This Loop Header: Depth=1 # Child Loop BB1_20 Depth 2 movq (%rbx,%r12,8), %r15 xorl %r13d, %r13d .p2align 4, 0x90 .LBB1_20: # Parent Loop BB1_19 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r15,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r13 cmpq $4, %r13 jne .LBB1_20 # %bb.21: # %_Z14MultiplicationPfS_S_i.exit # in Loop: Header=BB1_19 Depth=1 movl $10, %edi callq putchar@PLT incq %r12 cmpq $4, %r12 jne .LBB1_19 # %bb.22: movl $10, %edi callq putchar@PLT xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_23: # %.preheader88 # =>This Loop Header: Depth=1 # Child Loop BB1_24 Depth 2 movq (%r14,%r12,8), %r15 xorl %r13d, %r13d .p2align 4, 0x90 .LBB1_24: # Parent Loop BB1_23 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r15,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r13 cmpq $4, %r13 jne .LBB1_24 # %bb.25: # in Loop: Header=BB1_23 Depth=1 movl $10, %edi callq putchar@PLT incq %r12 cmpq $4, %r12 jne .LBB1_23 # %bb.26: movl $10, %edi callq putchar@PLT xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_27: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_28 Depth 2 movq 16(%rsp), %rax # 8-byte Reload movq (%rax,%r12,8), %r15 xorl %r13d, %r13d .p2align 4, 0x90 .LBB1_28: # Parent Loop BB1_27 Depth=1 # => This Inner Loop Header: Depth=2 movss (%rbp,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero movss %xmm0, (%r15,%r13,4) cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r13 cmpq $4, %r13 jne .LBB1_28 # %bb.29: # in Loop: Header=BB1_27 Depth=1 movl $10, %edi callq putchar@PLT incq %r12 addq $16, %rbp cmpq $4, %r12 jne .LBB1_27 # %bb.30: movq 32(%rsp), %rdi # 8-byte Reload callq free movq 24(%rsp), %rdi # 8-byte Reload callq free movq 8(%rsp), %rdi # 8-byte Reload callq free movq %rbx, %rdi callq free movq %r14, %rdi callq free movq 16(%rsp), %rdi # 8-byte Reload callq free xorl %eax, %eax addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%3.0f " .size .L.str, 8 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00169c71_00000000-6_laba6.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z14MultiplicationPfS_S_i .type _Z14MultiplicationPfS_S_i, @function _Z14MultiplicationPfS_S_i: .LFB2057: .cfi_startproc endbr64 testl %ecx, %ecx jle .L11 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movq %rdi, %r9 movq %rdx, %r12 movl %ecx, %ebp movslq %ecx, %r11 leaq 0(,%r11,4), %rcx movq %rsi, %r10 addq %rcx, %rsi movl $0, %ebx .L5: movq %r12, %r8 movl $0, %edi .L8: movq %r8, %rdx movq %r10, %rax pxor %xmm1, %xmm1 .L6: movss (%rax), %xmm0 mulss (%rdx), %xmm0 addss %xmm0, %xmm1 addq $4, %rax addq %rcx, %rdx cmpq %rsi, %rax jne .L6 movss %xmm1, (%r9,%rdi,4) addq $1, %rdi addq $4, %r8 cmpq %r11, %rdi jne .L8 addl $1, %ebx addq %rcx, %r9 addq %rcx, %r10 addq %rcx, %rsi cmpl %ebx, %ebp jne .L5 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L11: .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 ret .cfi_endproc .LFE2057: .size _Z14MultiplicationPfS_S_i, .-_Z14MultiplicationPfS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "%3.0f " .LC2: .string "\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $88, %rsp .cfi_def_cfa_offset 144 movl $32, %edi call malloc@PLT movq %rax, %rbx movq %rax, 16(%rsp) movq %rax, %r12 leaq 32(%rax), %rbp movq %rbp, 48(%rsp) .L15: movl $32, %edi call malloc@PLT movq %rax, (%rbx) addq $8, %rbx cmpq %rbp, %rbx jne .L15 movl $32, %edi call malloc@PLT movq %rax, %rbx movq %rax, 24(%rsp) movq %rax, %rbp leaq 32(%rax), %r13 movq %r13, 64(%rsp) .L16: movl $32, %edi call malloc@PLT movq %rax, (%rbx) addq $8, %rbx cmpq %r13, %rbx jne .L16 movl $32, %edi call malloc@PLT movq %rax, %rbx movq %rax, 56(%rsp) movq %rax, %r13 leaq 32(%rax), %r14 .L17: movl $32, %edi call malloc@PLT movq %rax, (%rbx) addq $8, %rbx cmpq %r14, %rbx jne .L17 movl $128, %edi call malloc@PLT movq %rax, 32(%rsp) movl $128, %edi call malloc@PLT movq %rax, 40(%rsp) movl $128, %edi call malloc@PLT movq %rax, 8(%rsp) movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT movq 24(%rsp), %r14 movq 16(%rsp), %r15 movq %rbp, 72(%rsp) movq %r15, %rbp movq 48(%rsp), %r15 .L18: movl $0, %ebx .L19: call rand@PLT movq 0(%rbp), %rcx movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $33, %rdx movl %eax, %esi sarl $31, %esi subl %esi, %edx leal (%rdx,%rdx,4), %edx subl %edx, %eax pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 movss %xmm0, (%rcx,%rbx) call rand@PLT movq (%r14), %rcx movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $34, %rdx movl %eax, %esi sarl $31, %esi subl %esi, %edx leal (%rdx,%rdx,4), %edx addl %edx, %edx subl %edx, %eax pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 movss %xmm0, (%rcx,%rbx) addq $4, %rbx cmpq $16, %rbx jne .L19 addq $8, %rbp addq $8, %r14 cmpq %r15, %rbp jne .L18 movq 72(%rsp), %rbp movq 32(%rsp), %rcx movq 40(%rsp), %rdx movl $0, %esi movq 16(%rsp), %r9 movq 24(%rsp), %r10 .L20: movq (%r9,%rsi,8), %r8 movq (%r10,%rsi,8), %rdi movl $0, %eax .L21: movss (%r8,%rax), %xmm0 movss %xmm0, (%rcx,%rax) movss (%rdi,%rax), %xmm0 movss %xmm0, (%rdx,%rax) addq $4, %rax cmpq $16, %rax jne .L21 addq $1, %rsi addq $16, %rcx addq $16, %rdx cmpq $4, %rsi jne .L20 movl $4, %ecx movq 40(%rsp), %rdx movq 32(%rsp), %rsi movq 8(%rsp), %rdi call _Z14MultiplicationPfS_S_i leaq .LC1(%rip), %r14 leaq .LC2(%rip), %r15 movq %rbp, 72(%rsp) movq 48(%rsp), %rbp .L23: movl $0, %ebx .L24: movq (%r12), %rax pxor %xmm0, %xmm0 cvtss2sd (%rax,%rbx), %xmm0 movq %r14, %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT addq $4, %rbx cmpq $16, %rbx jne .L24 movq %r15, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $8, %r12 cmpq %rbp, %r12 jne .L23 movq 72(%rsp), %rbp leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC1(%rip), %r12 leaq .LC2(%rip), %r14 movq 64(%rsp), %r15 .L26: movl $0, %ebx .L27: movq 0(%rbp), %rax pxor %xmm0, %xmm0 cvtss2sd (%rax,%rbx), %xmm0 movq %r12, %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT addq $4, %rbx cmpq $16, %rbx jne .L27 movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $8, %rbp cmpq %r15, %rbp jne .L26 leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 8(%rsp), %rbp movl $0, %r14d leaq .LC1(%rip), %r12 leaq .LC2(%rip), %r15 .L29: movl $0, %ebx .L30: movss 0(%rbp,%rbx), %xmm0 movq 0(%r13), %rax movss %xmm0, (%rax,%rbx) cvtss2sd %xmm0, %xmm0 movq %r12, %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT addq $4, %rbx cmpq $16, %rbx jne .L30 movq %r15, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $4, %r14d addq $8, %r13 addq $16, %rbp cmpl $16, %r14d jne .L29 movq 32(%rsp), %rdi call free@PLT movq 40(%rsp), %rdi call free@PLT movq 8(%rsp), %rdi call free@PLT movq 16(%rsp), %rdi call free@PLT movq 24(%rsp), %rdi call free@PLT movq 56(%rsp), %rdi call free@PLT movl $0, %eax addq $88, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "laba6.hip" .globl _Z14MultiplicationPfS_S_i # -- Begin function _Z14MultiplicationPfS_S_i .p2align 4, 0x90 .type _Z14MultiplicationPfS_S_i,@function _Z14MultiplicationPfS_S_i: # @_Z14MultiplicationPfS_S_i .cfi_startproc # %bb.0: testl %ecx, %ecx jle .LBB0_8 # %bb.1: # %.preheader26.lr.ph pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %ecx, %eax leaq (,%rax,4), %r8 xorl %r9d, %r9d xorl %r10d, %r10d .p2align 4, 0x90 .LBB0_2: # %.preheader26 # =>This Loop Header: Depth=1 # Child Loop BB0_3 Depth 2 # Child Loop BB0_4 Depth 3 movl %r9d, %r11d leaq (%rsi,%r11,4), %r11 movq %r10, %rbx imulq %rax, %rbx leaq (%rdi,%rbx,4), %rbx movq %rdx, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB0_3: # %.preheader # Parent Loop BB0_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB0_4 Depth 3 xorps %xmm0, %xmm0 movq %r14, %r12 xorl %r13d, %r13d .p2align 4, 0x90 .LBB0_4: # Parent Loop BB0_2 Depth=1 # Parent Loop BB0_3 Depth=2 # => This Inner Loop Header: Depth=3 movss (%r11,%r13,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss (%r12), %xmm1 addss %xmm1, %xmm0 incq %r13 addq %r8, %r12 cmpq %r13, %rax jne .LBB0_4 # %bb.5: # %._crit_edge # in Loop: Header=BB0_3 Depth=2 movss %xmm0, (%rbx,%r15,4) incq %r15 addq $4, %r14 cmpq %rax, %r15 jne .LBB0_3 # %bb.6: # %._crit_edge30 # in Loop: Header=BB0_2 Depth=1 incq %r10 addl %ecx, %r9d cmpq %rax, %r10 jne .LBB0_2 # %bb.7: popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .LBB0_8: # %._crit_edge32 retq .Lfunc_end0: .size _Z14MultiplicationPfS_S_i, .Lfunc_end0-_Z14MultiplicationPfS_S_i .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $40, %rsp .cfi_def_cfa_offset 96 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $32, %edi callq malloc movq %rax, %rbx xorl %r14d, %r14d .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movl $32, %edi callq malloc movq %rax, (%rbx,%r14,8) incq %r14 cmpq $4, %r14 jne .LBB1_1 # %bb.2: movl $32, %edi callq malloc movq %rax, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_3: # =>This Inner Loop Header: Depth=1 movl $32, %edi callq malloc movq %rax, (%r14,%r15,8) incq %r15 cmpq $4, %r15 jne .LBB1_3 # %bb.4: movl $32, %edi callq malloc movq %rax, %r12 xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_5: # =>This Inner Loop Header: Depth=1 movl $32, %edi callq malloc movq %rax, (%r12,%r15,8) incq %r15 cmpq $4, %r15 jne .LBB1_5 # %bb.6: movq %r12, 16(%rsp) # 8-byte Spill movl $128, %edi callq malloc movq %rax, %r15 movl $128, %edi callq malloc movq %rax, %r13 movl $128, %edi callq malloc movq %rax, 8(%rsp) # 8-byte Spill xorl %r12d, %r12d xorl %edi, %edi callq time movl %eax, %edi callq srand .p2align 4, 0x90 .LBB1_7: # %.preheader92 # =>This Loop Header: Depth=1 # Child Loop BB1_8 Depth 2 xorl %ebp, %ebp .p2align 4, 0x90 .LBB1_8: # Parent Loop BB1_7 Depth=1 # => This Inner Loop Header: Depth=2 callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $33, %rcx addl %edx, %ecx leal (%rcx,%rcx,4), %ecx subl %ecx, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movq (%rbx,%r12,8), %rax movss %xmm0, (%rax,%rbp,4) callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $34, %rcx addl %edx, %ecx addl %ecx, %ecx leal (%rcx,%rcx,4), %ecx subl %ecx, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movq (%r14,%r12,8), %rax movss %xmm0, (%rax,%rbp,4) incq %rbp cmpq $4, %rbp jne .LBB1_8 # %bb.9: # in Loop: Header=BB1_7 Depth=1 incq %r12 cmpq $4, %r12 jne .LBB1_7 # %bb.10: # %.preheader90.preheader xorl %eax, %eax .p2align 4, 0x90 .LBB1_11: # %.preheader90 # =>This Inner Loop Header: Depth=1 movq (%rbx,%rax), %rcx movq (%r14,%rax), %rdx movups (%rcx), %xmm0 movups %xmm0, (%r15,%rax,2) movups (%rdx), %xmm0 movups %xmm0, (%r13,%rax,2) addq $8, %rax cmpq $32, %rax jne .LBB1_11 # %bb.12: xorl %eax, %eax movq %r15, %rcx movq 8(%rsp), %rbp # 8-byte Reload .p2align 4, 0x90 .LBB1_13: # %.preheader26.i # =>This Loop Header: Depth=1 # Child Loop BB1_14 Depth 2 # Child Loop BB1_15 Depth 3 movq %rax, %rdx shlq $4, %rdx addq %rbp, %rdx movq %r13, %rsi xorl %edi, %edi .p2align 4, 0x90 .LBB1_14: # %.preheader.i # Parent Loop BB1_13 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB1_15 Depth 3 xorps %xmm0, %xmm0 xorl %r8d, %r8d .p2align 4, 0x90 .LBB1_15: # Parent Loop BB1_13 Depth=1 # Parent Loop BB1_14 Depth=2 # => This Inner Loop Header: Depth=3 movss (%rcx,%r8), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss (%rsi,%r8,4), %xmm1 addss %xmm1, %xmm0 addq $4, %r8 cmpq $16, %r8 jne .LBB1_15 # %bb.16: # %._crit_edge.i # in Loop: Header=BB1_14 Depth=2 movss %xmm0, (%rdx,%rdi,4) incq %rdi addq $4, %rsi cmpq $4, %rdi jne .LBB1_14 # %bb.17: # %._crit_edge30.i # in Loop: Header=BB1_13 Depth=1 incq %rax addq $16, %rcx cmpq $4, %rax jne .LBB1_13 # %bb.18: # %.preheader89.preheader movq %r13, 24(%rsp) # 8-byte Spill movq %r15, 32(%rsp) # 8-byte Spill xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_19: # %.preheader89 # =>This Loop Header: Depth=1 # Child Loop BB1_20 Depth 2 movq (%rbx,%r12,8), %r15 xorl %r13d, %r13d .p2align 4, 0x90 .LBB1_20: # Parent Loop BB1_19 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r15,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r13 cmpq $4, %r13 jne .LBB1_20 # %bb.21: # %_Z14MultiplicationPfS_S_i.exit # in Loop: Header=BB1_19 Depth=1 movl $10, %edi callq putchar@PLT incq %r12 cmpq $4, %r12 jne .LBB1_19 # %bb.22: movl $10, %edi callq putchar@PLT xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_23: # %.preheader88 # =>This Loop Header: Depth=1 # Child Loop BB1_24 Depth 2 movq (%r14,%r12,8), %r15 xorl %r13d, %r13d .p2align 4, 0x90 .LBB1_24: # Parent Loop BB1_23 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r15,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r13 cmpq $4, %r13 jne .LBB1_24 # %bb.25: # in Loop: Header=BB1_23 Depth=1 movl $10, %edi callq putchar@PLT incq %r12 cmpq $4, %r12 jne .LBB1_23 # %bb.26: movl $10, %edi callq putchar@PLT xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_27: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_28 Depth 2 movq 16(%rsp), %rax # 8-byte Reload movq (%rax,%r12,8), %r15 xorl %r13d, %r13d .p2align 4, 0x90 .LBB1_28: # Parent Loop BB1_27 Depth=1 # => This Inner Loop Header: Depth=2 movss (%rbp,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero movss %xmm0, (%r15,%r13,4) cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r13 cmpq $4, %r13 jne .LBB1_28 # %bb.29: # in Loop: Header=BB1_27 Depth=1 movl $10, %edi callq putchar@PLT incq %r12 addq $16, %rbp cmpq $4, %r12 jne .LBB1_27 # %bb.30: movq 32(%rsp), %rdi # 8-byte Reload callq free movq 24(%rsp), %rdi # 8-byte Reload callq free movq 8(%rsp), %rdi # 8-byte Reload callq free movq %rbx, %rdi callq free movq %r14, %rdi callq free movq 16(%rsp), %rdi # 8-byte Reload callq free xorl %eax, %eax addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%3.0f " .size .L.str, 8 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
//--------------------------------- // PROGRAMA BASICO EN C //-------------------------------- // Moises Roman Delgadillo Perez // 27/08/2021 //-------------------------------- //Input and output libraries #include<stdio.h> #include<stdlib.h> //Predefinitons #define N 512 #define Real double //-------------------------------- //Array addition function //c = a + b //-------------------------------- void host_add(int* a, int* b, int* c) { for (int idx = 0;idx<N;idx++) c[idx] = a[idx] + b[idx]; } //-------------------------------- //Array filling //-------------------------------- void fill_array(int* data) { for (int idx = 0; idx < N; idx++) data[idx] = idx; } //-------------------------------- //Data output //-------------------------------- void print_output(int* a, int* b, int* c) { for (int idx = 0; idx < N; idx++) printf("\n %d + %d = %d", a[idx], b[idx], c[idx]); printf("\n"); } //-------------------------------- //Main function //-------------------------------- int main(void) { //Array integers int* a, * b, * c; //Array memory size int size = N * sizeof(int); //request and memory filling for arrays a = (int*)malloc(size); fill_array(a); b = (int*)malloc(size); fill_array(b); c = (int*)malloc(size); //Calling function addition host_add(a, b, c); //Writing result in screen print_output(a, b, c); //Memory release free(a); free(b); free(c); //Program success return 0; }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
//--------------------------------- // PROGRAMA BASICO EN C //-------------------------------- // Moises Roman Delgadillo Perez // 27/08/2021 //-------------------------------- //Input and output libraries #include<stdio.h> #include<stdlib.h> //Predefinitons #define N 512 #define Real double //-------------------------------- //Array addition function //c = a + b //-------------------------------- void host_add(int* a, int* b, int* c) { for (int idx = 0;idx<N;idx++) c[idx] = a[idx] + b[idx]; } //-------------------------------- //Array filling //-------------------------------- void fill_array(int* data) { for (int idx = 0; idx < N; idx++) data[idx] = idx; } //-------------------------------- //Data output //-------------------------------- void print_output(int* a, int* b, int* c) { for (int idx = 0; idx < N; idx++) printf("\n %d + %d = %d", a[idx], b[idx], c[idx]); printf("\n"); } //-------------------------------- //Main function //-------------------------------- int main(void) { //Array integers int* a, * b, * c; //Array memory size int size = N * sizeof(int); //request and memory filling for arrays a = (int*)malloc(size); fill_array(a); b = (int*)malloc(size); fill_array(b); c = (int*)malloc(size); //Calling function addition host_add(a, b, c); //Writing result in screen print_output(a, b, c); //Memory release free(a); free(b); free(c); //Program success return 0; }
.file "tmpxft_0019a05e_00000000-6_practica1.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z8host_addPiS_S_ .type _Z8host_addPiS_S_, @function _Z8host_addPiS_S_: .LFB2057: .cfi_startproc endbr64 movl $0, %eax .L4: movl (%rsi,%rax), %ecx addl (%rdi,%rax), %ecx movl %ecx, (%rdx,%rax) addq $4, %rax cmpq $2048, %rax jne .L4 ret .cfi_endproc .LFE2057: .size _Z8host_addPiS_S_, .-_Z8host_addPiS_S_ .globl _Z10fill_arrayPi .type _Z10fill_arrayPi, @function _Z10fill_arrayPi: .LFB2058: .cfi_startproc endbr64 movl $0, %eax .L7: movl %eax, (%rdi,%rax,4) addq $1, %rax cmpq $512, %rax jne .L7 ret .cfi_endproc .LFE2058: .size _Z10fill_arrayPi, .-_Z10fill_arrayPi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "\n %d + %d = %d" .LC1: .string "\n" .text .globl _Z12print_outputPiS_S_ .type _Z12print_outputPiS_S_, @function _Z12print_outputPiS_S_: .LFB2059: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 movq %rdi, %r13 movq %rsi, %r12 movq %rdx, %rbp movl $0, %ebx leaq .LC0(%rip), %r14 .L10: movl (%r12,%rbx), %ecx movl 0(%r13,%rbx), %edx movl 0(%rbp,%rbx), %r8d movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $4, %rbx cmpq $2048, %rbx jne .L10 leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _Z12print_outputPiS_S_, .-_Z12print_outputPiS_S_ .globl main .type main, @function main: .LFB2060: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movl $2048, %edi call malloc@PLT movq %rax, %rbp movq %rax, %rdi call _Z10fill_arrayPi movl $2048, %edi call malloc@PLT movq %rax, %rbx movq %rax, %rdi call _Z10fill_arrayPi movl $2048, %edi call malloc@PLT movq %rax, %r12 movq %rax, %rdx movq %rbx, %rsi movq %rbp, %rdi call _Z8host_addPiS_S_ movq %r12, %rdx movq %rbx, %rsi movq %rbp, %rdi call _Z12print_outputPiS_S_ movq %rbp, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq %r12, %rdi call free@PLT movl $0, %eax popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
//--------------------------------- // PROGRAMA BASICO EN C //-------------------------------- // Moises Roman Delgadillo Perez // 27/08/2021 //-------------------------------- //Input and output libraries #include<stdio.h> #include<stdlib.h> //Predefinitons #define N 512 #define Real double //-------------------------------- //Array addition function //c = a + b //-------------------------------- void host_add(int* a, int* b, int* c) { for (int idx = 0;idx<N;idx++) c[idx] = a[idx] + b[idx]; } //-------------------------------- //Array filling //-------------------------------- void fill_array(int* data) { for (int idx = 0; idx < N; idx++) data[idx] = idx; } //-------------------------------- //Data output //-------------------------------- void print_output(int* a, int* b, int* c) { for (int idx = 0; idx < N; idx++) printf("\n %d + %d = %d", a[idx], b[idx], c[idx]); printf("\n"); } //-------------------------------- //Main function //-------------------------------- int main(void) { //Array integers int* a, * b, * c; //Array memory size int size = N * sizeof(int); //request and memory filling for arrays a = (int*)malloc(size); fill_array(a); b = (int*)malloc(size); fill_array(b); c = (int*)malloc(size); //Calling function addition host_add(a, b, c); //Writing result in screen print_output(a, b, c); //Memory release free(a); free(b); free(c); //Program success return 0; }
//--------------------------------- // PROGRAMA BASICO EN C //-------------------------------- // Moises Roman Delgadillo Perez // 27/08/2021 //-------------------------------- //Input and output libraries #include <hip/hip_runtime.h> #include<stdio.h> #include<stdlib.h> //Predefinitons #define N 512 #define Real double //-------------------------------- //Array addition function //c = a + b //-------------------------------- void host_add(int* a, int* b, int* c) { for (int idx = 0;idx<N;idx++) c[idx] = a[idx] + b[idx]; } //-------------------------------- //Array filling //-------------------------------- void fill_array(int* data) { for (int idx = 0; idx < N; idx++) data[idx] = idx; } //-------------------------------- //Data output //-------------------------------- void print_output(int* a, int* b, int* c) { for (int idx = 0; idx < N; idx++) printf("\n %d + %d = %d", a[idx], b[idx], c[idx]); printf("\n"); } //-------------------------------- //Main function //-------------------------------- int main(void) { //Array integers int* a, * b, * c; //Array memory size int size = N * sizeof(int); //request and memory filling for arrays a = (int*)malloc(size); fill_array(a); b = (int*)malloc(size); fill_array(b); c = (int*)malloc(size); //Calling function addition host_add(a, b, c); //Writing result in screen print_output(a, b, c); //Memory release free(a); free(b); free(c); //Program success return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
//--------------------------------- // PROGRAMA BASICO EN C //-------------------------------- // Moises Roman Delgadillo Perez // 27/08/2021 //-------------------------------- //Input and output libraries #include <hip/hip_runtime.h> #include<stdio.h> #include<stdlib.h> //Predefinitons #define N 512 #define Real double //-------------------------------- //Array addition function //c = a + b //-------------------------------- void host_add(int* a, int* b, int* c) { for (int idx = 0;idx<N;idx++) c[idx] = a[idx] + b[idx]; } //-------------------------------- //Array filling //-------------------------------- void fill_array(int* data) { for (int idx = 0; idx < N; idx++) data[idx] = idx; } //-------------------------------- //Data output //-------------------------------- void print_output(int* a, int* b, int* c) { for (int idx = 0; idx < N; idx++) printf("\n %d + %d = %d", a[idx], b[idx], c[idx]); printf("\n"); } //-------------------------------- //Main function //-------------------------------- int main(void) { //Array integers int* a, * b, * c; //Array memory size int size = N * sizeof(int); //request and memory filling for arrays a = (int*)malloc(size); fill_array(a); b = (int*)malloc(size); fill_array(b); c = (int*)malloc(size); //Calling function addition host_add(a, b, c); //Writing result in screen print_output(a, b, c); //Memory release free(a); free(b); free(c); //Program success return 0; }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
//--------------------------------- // PROGRAMA BASICO EN C //-------------------------------- // Moises Roman Delgadillo Perez // 27/08/2021 //-------------------------------- //Input and output libraries #include <hip/hip_runtime.h> #include<stdio.h> #include<stdlib.h> //Predefinitons #define N 512 #define Real double //-------------------------------- //Array addition function //c = a + b //-------------------------------- void host_add(int* a, int* b, int* c) { for (int idx = 0;idx<N;idx++) c[idx] = a[idx] + b[idx]; } //-------------------------------- //Array filling //-------------------------------- void fill_array(int* data) { for (int idx = 0; idx < N; idx++) data[idx] = idx; } //-------------------------------- //Data output //-------------------------------- void print_output(int* a, int* b, int* c) { for (int idx = 0; idx < N; idx++) printf("\n %d + %d = %d", a[idx], b[idx], c[idx]); printf("\n"); } //-------------------------------- //Main function //-------------------------------- int main(void) { //Array integers int* a, * b, * c; //Array memory size int size = N * sizeof(int); //request and memory filling for arrays a = (int*)malloc(size); fill_array(a); b = (int*)malloc(size); fill_array(b); c = (int*)malloc(size); //Calling function addition host_add(a, b, c); //Writing result in screen print_output(a, b, c); //Memory release free(a); free(b); free(c); //Program success return 0; }
.text .file "practica1.hip" .globl _Z8host_addPiS_S_ # -- Begin function _Z8host_addPiS_S_ .p2align 4, 0x90 .type _Z8host_addPiS_S_,@function _Z8host_addPiS_S_: # @_Z8host_addPiS_S_ .cfi_startproc # %bb.0: xorl %eax, %eax .p2align 4, 0x90 .LBB0_1: # =>This Inner Loop Header: Depth=1 movl (%rsi,%rax,4), %ecx addl (%rdi,%rax,4), %ecx movl %ecx, (%rdx,%rax,4) incq %rax cmpq $512, %rax # imm = 0x200 jne .LBB0_1 # %bb.2: retq .Lfunc_end0: .size _Z8host_addPiS_S_, .Lfunc_end0-_Z8host_addPiS_S_ .cfi_endproc # -- End function .globl _Z10fill_arrayPi # -- Begin function _Z10fill_arrayPi .p2align 4, 0x90 .type _Z10fill_arrayPi,@function _Z10fill_arrayPi: # @_Z10fill_arrayPi .cfi_startproc # %bb.0: xorl %eax, %eax .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movl %eax, (%rdi,%rax,4) incq %rax cmpq $512, %rax # imm = 0x200 jne .LBB1_1 # %bb.2: retq .Lfunc_end1: .size _Z10fill_arrayPi, .Lfunc_end1-_Z10fill_arrayPi .cfi_endproc # -- End function .globl _Z12print_outputPiS_S_ # -- Begin function _Z12print_outputPiS_S_ .p2align 4, 0x90 .type _Z12print_outputPiS_S_,@function _Z12print_outputPiS_S_: # @_Z12print_outputPiS_S_ .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 xorl %r12d, %r12d .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 movl (%r15,%r12,4), %esi movl (%r14,%r12,4), %edx movl (%rbx,%r12,4), %ecx movl $.L.str, %edi xorl %eax, %eax callq printf incq %r12 cmpq $512, %r12 # imm = 0x200 jne .LBB2_1 # %bb.2: movl $10, %edi addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 jmp putchar@PLT # TAILCALL .Lfunc_end2: .size _Z12print_outputPiS_S_, .Lfunc_end2-_Z12print_outputPiS_S_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl $2048, %edi # imm = 0x800 callq malloc movq %rax, %rbx xorl %eax, %eax .p2align 4, 0x90 .LBB3_1: # =>This Inner Loop Header: Depth=1 movl %eax, (%rbx,%rax,4) incq %rax cmpq $512, %rax # imm = 0x200 jne .LBB3_1 # %bb.2: # %_Z10fill_arrayPi.exit movl $2048, %edi # imm = 0x800 callq malloc movq %rax, %r14 xorl %eax, %eax .p2align 4, 0x90 .LBB3_3: # =>This Inner Loop Header: Depth=1 movl %eax, (%r14,%rax,4) incq %rax cmpq $512, %rax # imm = 0x200 jne .LBB3_3 # %bb.4: # %_Z10fill_arrayPi.exit16 movl $2048, %edi # imm = 0x800 callq malloc movq %rax, %r15 xorl %eax, %eax .p2align 4, 0x90 .LBB3_5: # =>This Inner Loop Header: Depth=1 movl (%r14,%rax,4), %ecx addl (%rbx,%rax,4), %ecx movl %ecx, (%r15,%rax,4) incq %rax cmpq $512, %rax # imm = 0x200 jne .LBB3_5 # %bb.6: # %_Z8host_addPiS_S_.exit.preheader xorl %r12d, %r12d .p2align 4, 0x90 .LBB3_7: # %_Z8host_addPiS_S_.exit # =>This Inner Loop Header: Depth=1 movl (%rbx,%r12,4), %esi movl (%r14,%r12,4), %edx movl (%r15,%r12,4), %ecx movl $.L.str, %edi xorl %eax, %eax callq printf incq %r12 cmpq $512, %r12 # imm = 0x200 jne .LBB3_7 # %bb.8: # %_Z12print_outputPiS_S_.exit movl $10, %edi callq putchar@PLT movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free xorl %eax, %eax addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "\n %d + %d = %d" .size .L.str, 15 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0019a05e_00000000-6_practica1.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z8host_addPiS_S_ .type _Z8host_addPiS_S_, @function _Z8host_addPiS_S_: .LFB2057: .cfi_startproc endbr64 movl $0, %eax .L4: movl (%rsi,%rax), %ecx addl (%rdi,%rax), %ecx movl %ecx, (%rdx,%rax) addq $4, %rax cmpq $2048, %rax jne .L4 ret .cfi_endproc .LFE2057: .size _Z8host_addPiS_S_, .-_Z8host_addPiS_S_ .globl _Z10fill_arrayPi .type _Z10fill_arrayPi, @function _Z10fill_arrayPi: .LFB2058: .cfi_startproc endbr64 movl $0, %eax .L7: movl %eax, (%rdi,%rax,4) addq $1, %rax cmpq $512, %rax jne .L7 ret .cfi_endproc .LFE2058: .size _Z10fill_arrayPi, .-_Z10fill_arrayPi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "\n %d + %d = %d" .LC1: .string "\n" .text .globl _Z12print_outputPiS_S_ .type _Z12print_outputPiS_S_, @function _Z12print_outputPiS_S_: .LFB2059: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 movq %rdi, %r13 movq %rsi, %r12 movq %rdx, %rbp movl $0, %ebx leaq .LC0(%rip), %r14 .L10: movl (%r12,%rbx), %ecx movl 0(%r13,%rbx), %edx movl 0(%rbp,%rbx), %r8d movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $4, %rbx cmpq $2048, %rbx jne .L10 leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _Z12print_outputPiS_S_, .-_Z12print_outputPiS_S_ .globl main .type main, @function main: .LFB2060: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movl $2048, %edi call malloc@PLT movq %rax, %rbp movq %rax, %rdi call _Z10fill_arrayPi movl $2048, %edi call malloc@PLT movq %rax, %rbx movq %rax, %rdi call _Z10fill_arrayPi movl $2048, %edi call malloc@PLT movq %rax, %r12 movq %rax, %rdx movq %rbx, %rsi movq %rbp, %rdi call _Z8host_addPiS_S_ movq %r12, %rdx movq %rbx, %rsi movq %rbp, %rdi call _Z12print_outputPiS_S_ movq %rbp, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq %r12, %rdi call free@PLT movl $0, %eax popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "practica1.hip" .globl _Z8host_addPiS_S_ # -- Begin function _Z8host_addPiS_S_ .p2align 4, 0x90 .type _Z8host_addPiS_S_,@function _Z8host_addPiS_S_: # @_Z8host_addPiS_S_ .cfi_startproc # %bb.0: xorl %eax, %eax .p2align 4, 0x90 .LBB0_1: # =>This Inner Loop Header: Depth=1 movl (%rsi,%rax,4), %ecx addl (%rdi,%rax,4), %ecx movl %ecx, (%rdx,%rax,4) incq %rax cmpq $512, %rax # imm = 0x200 jne .LBB0_1 # %bb.2: retq .Lfunc_end0: .size _Z8host_addPiS_S_, .Lfunc_end0-_Z8host_addPiS_S_ .cfi_endproc # -- End function .globl _Z10fill_arrayPi # -- Begin function _Z10fill_arrayPi .p2align 4, 0x90 .type _Z10fill_arrayPi,@function _Z10fill_arrayPi: # @_Z10fill_arrayPi .cfi_startproc # %bb.0: xorl %eax, %eax .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movl %eax, (%rdi,%rax,4) incq %rax cmpq $512, %rax # imm = 0x200 jne .LBB1_1 # %bb.2: retq .Lfunc_end1: .size _Z10fill_arrayPi, .Lfunc_end1-_Z10fill_arrayPi .cfi_endproc # -- End function .globl _Z12print_outputPiS_S_ # -- Begin function _Z12print_outputPiS_S_ .p2align 4, 0x90 .type _Z12print_outputPiS_S_,@function _Z12print_outputPiS_S_: # @_Z12print_outputPiS_S_ .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 xorl %r12d, %r12d .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 movl (%r15,%r12,4), %esi movl (%r14,%r12,4), %edx movl (%rbx,%r12,4), %ecx movl $.L.str, %edi xorl %eax, %eax callq printf incq %r12 cmpq $512, %r12 # imm = 0x200 jne .LBB2_1 # %bb.2: movl $10, %edi addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 jmp putchar@PLT # TAILCALL .Lfunc_end2: .size _Z12print_outputPiS_S_, .Lfunc_end2-_Z12print_outputPiS_S_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl $2048, %edi # imm = 0x800 callq malloc movq %rax, %rbx xorl %eax, %eax .p2align 4, 0x90 .LBB3_1: # =>This Inner Loop Header: Depth=1 movl %eax, (%rbx,%rax,4) incq %rax cmpq $512, %rax # imm = 0x200 jne .LBB3_1 # %bb.2: # %_Z10fill_arrayPi.exit movl $2048, %edi # imm = 0x800 callq malloc movq %rax, %r14 xorl %eax, %eax .p2align 4, 0x90 .LBB3_3: # =>This Inner Loop Header: Depth=1 movl %eax, (%r14,%rax,4) incq %rax cmpq $512, %rax # imm = 0x200 jne .LBB3_3 # %bb.4: # %_Z10fill_arrayPi.exit16 movl $2048, %edi # imm = 0x800 callq malloc movq %rax, %r15 xorl %eax, %eax .p2align 4, 0x90 .LBB3_5: # =>This Inner Loop Header: Depth=1 movl (%r14,%rax,4), %ecx addl (%rbx,%rax,4), %ecx movl %ecx, (%r15,%rax,4) incq %rax cmpq $512, %rax # imm = 0x200 jne .LBB3_5 # %bb.6: # %_Z8host_addPiS_S_.exit.preheader xorl %r12d, %r12d .p2align 4, 0x90 .LBB3_7: # %_Z8host_addPiS_S_.exit # =>This Inner Loop Header: Depth=1 movl (%rbx,%r12,4), %esi movl (%r14,%r12,4), %edx movl (%r15,%r12,4), %ecx movl $.L.str, %edi xorl %eax, %eax callq printf incq %r12 cmpq $512, %r12 # imm = 0x200 jne .LBB3_7 # %bb.8: # %_Z12print_outputPiS_S_.exit movl $10, %edi callq putchar@PLT movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free xorl %eax, %eax addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "\n %d + %d = %d" .size .L.str, 15 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "cuda_runtime.h" #include <math.h> void SimpleSummator(double* a, double* b, double* c, int length){ for (int i = 0; i < length; i++){ c[i] = sinf(a[i]) + sinf(b[i]); } } __global__ void CUDASummator(double* a, double* b, double* c){ int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = sinf(a[i]) + sinf(b[i]); } __global__ void CUDASinusator(double* a, double* result){ int i = threadIdx.x + blockIdx.x * blockDim.x; result[i] = sinf(a[i]); } __global__ void RangeSummator(double* a, double* b, double* c, int bottomB){ int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = sinf(a[i]); } int GPU_Sinusator(double* a, double* result, int length){ double* dev_a; double* dev_result; cudaMalloc((void**)&dev_a, length * sizeof(double)); cudaMalloc((void**)&dev_result, length * sizeof(double)); cudaMemcpy(dev_a, a, length * sizeof(double), cudaMemcpyHostToDevice); CUDASinusator<<<100,length/100>>>(dev_a,dev_result); cudaMemcpy(result,dev_result,length * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(dev_a); cudaFree(dev_result); return 9999; } int count = 100; int GPU_Summator(double* a, double* b, double* c, int length, int type){ if (type == 1) { SimpleSummator(a,b,c,length); return 11111; } if (type == 2) { double* dev_a; double* dev_b; double* dev_c; float elapsed=0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaMalloc((void**)&dev_a, length * sizeof(double)); cudaMalloc((void**)&dev_b, length * sizeof(double)); cudaMalloc((void**)&dev_c, length * sizeof(double)); cudaMemcpy(dev_a, a, length * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, length * sizeof(double), cudaMemcpyHostToDevice); CUDASummator<<<count,length/count>>>(dev_a,dev_b,dev_c); cudaMemcpy(c,dev_c,length * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cudaEventRecord(stop, 0); cudaEventSynchronize (stop); cudaEventElapsedTime(&elapsed, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); return (int)(elapsed*1000); } int repeatC = 4; float el; if (type == 3){ float bestT = 10000.0; float bestC = 10; for (int i = 10; i < 1000; i+=10){ el = 0.0; for (int r = 0; r < repeatC; r++){ double* dev_a; double* dev_b; double* dev_c; float elapsed=0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaMalloc((void**)&dev_a, length * sizeof(double)); cudaMalloc((void**)&dev_b, length * sizeof(double)); cudaMalloc((void**)&dev_c, length * sizeof(double)); cudaMemcpy(dev_a, a, length * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, length * sizeof(double), cudaMemcpyHostToDevice); CUDASummator<<<i,length/i>>>(dev_a,dev_b,dev_c); cudaMemcpy(c,dev_c,length * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cudaEventRecord(stop, 0); cudaEventSynchronize (stop); cudaEventElapsedTime(&elapsed, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); el += elapsed; } if (el/repeatC < bestT) { bestT = el/repeatC; bestC = i; } } count = bestC; return bestC; } return 99999; };
.file "tmpxft_00143a5a_00000000-6_CUDA.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2032: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2032: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z14SimpleSummatorPdS_S_i .type _Z14SimpleSummatorPdS_S_i, @function _Z14SimpleSummatorPdS_S_i: .LFB2027: .cfi_startproc endbr64 testl %ecx, %ecx jle .L8 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $16, %rsp .cfi_def_cfa_offset 64 movq %rdi, %r12 movq %rsi, %r13 movq %rdx, %r14 movslq %ecx, %rcx leaq 0(,%rcx,8), %rbp movl $0, %ebx .L5: pxor %xmm0, %xmm0 cvtsd2ss (%r12,%rbx), %xmm0 call sinf@PLT movss %xmm0, 12(%rsp) pxor %xmm0, %xmm0 cvtsd2ss 0(%r13,%rbx), %xmm0 call sinf@PLT addss 12(%rsp), %xmm0 cvtss2sd %xmm0, %xmm0 movsd %xmm0, (%r14,%rbx) addq $8, %rbx cmpq %rbp, %rbx jne .L5 addq $16, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L8: .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 .cfi_restore 13 .cfi_restore 14 ret .cfi_endproc .LFE2027: .size _Z14SimpleSummatorPdS_S_i, .-_Z14SimpleSummatorPdS_S_i .globl _Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_ .type _Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_, @function _Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_: .LFB2054: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 120(%rsp), %rax subq %fs:40, %rax jne .L16 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12CUDASummatorPdS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2054: .size _Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_, .-_Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_ .globl _Z12CUDASummatorPdS_S_ .type _Z12CUDASummatorPdS_S_, @function _Z12CUDASummatorPdS_S_: .LFB2055: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2055: .size _Z12CUDASummatorPdS_S_, .-_Z12CUDASummatorPdS_S_ .globl _Z12GPU_SummatorPdS_S_ii .type _Z12GPU_SummatorPdS_S_ii, @function _Z12GPU_SummatorPdS_S_ii: .LFB2029: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $120, %rsp .cfi_def_cfa_offset 176 movq %rdi, %r13 movq %rsi, %r14 movq %rdx, %r15 movl %ecx, 20(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax cmpl $1, %r8d je .L35 cmpl $2, %r8d je .L36 movl $99999, %eax cmpl $3, %r8d je .L37 .L19: movq 104(%rsp), %rdx subq %fs:40, %rdx jne .L38 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L35: .cfi_restore_state call _Z14SimpleSummatorPdS_S_i movl $11111, %eax jmp .L19 .L36: movl $0x00000000, 36(%rsp) leaq 64(%rsp), %rdi call cudaEventCreate@PLT leaq 72(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 64(%rsp), %rdi call cudaEventRecord@PLT movl 20(%rsp), %ebp movslq %ebp, %rbx salq $3, %rbx leaq 40(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 48(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 56(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbx, %rdx movq %r13, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %r14, %rsi movq 48(%rsp), %rdi call cudaMemcpy@PLT movl count(%rip), %ecx movl %ebp, %eax cltd idivl %ecx movl %eax, 92(%rsp) movl $1, 96(%rsp) movl $1, 100(%rsp) movl %ecx, 80(%rsp) movl $1, 84(%rsp) movl $1, 88(%rsp) movl $0, %r9d movl $0, %r8d movq 92(%rsp), %rdx movl $1, %ecx movq 80(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L39 .L23: movl $2, %ecx movq %rbx, %rdx movq 56(%rsp), %rsi movq %r15, %rdi call cudaMemcpy@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq 48(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rdi call cudaFree@PLT movl $0, %esi movq 72(%rsp), %rdi call cudaEventRecord@PLT movq 72(%rsp), %rdi call cudaEventSynchronize@PLT leaq 36(%rsp), %rdi movq 72(%rsp), %rdx movq 64(%rsp), %rsi call cudaEventElapsedTime@PLT movq 64(%rsp), %rdi call cudaEventDestroy@PLT movq 72(%rsp), %rdi call cudaEventDestroy@PLT movss .LC3(%rip), %xmm0 mulss 36(%rsp), %xmm0 cvttss2sil %xmm0, %eax jmp .L19 .L39: movq 56(%rsp), %rdx movq 48(%rsp), %rsi movq 40(%rsp), %rdi call _Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_ jmp .L23 .L37: movslq 20(%rsp), %rbx salq $3, %rbx movl $10, %ebp movss .LC1(%rip), %xmm5 movss %xmm5, 28(%rsp) movss .LC2(%rip), %xmm6 movss %xmm6, 24(%rsp) jmp .L24 .L25: movl $2, %ecx movq %rbx, %rdx movq 56(%rsp), %rsi movq %r15, %rdi call cudaMemcpy@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq 48(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rdi call cudaFree@PLT movl $0, %esi movq 72(%rsp), %rdi call cudaEventRecord@PLT movq 72(%rsp), %rdi call cudaEventSynchronize@PLT leaq 36(%rsp), %rdi movq 72(%rsp), %rdx movq 64(%rsp), %rsi call cudaEventElapsedTime@PLT movq 64(%rsp), %rdi call cudaEventDestroy@PLT movq 72(%rsp), %rdi call cudaEventDestroy@PLT movss 16(%rsp), %xmm1 addss 36(%rsp), %xmm1 movss %xmm1, 16(%rsp) subl $1, %r12d je .L40 .L26: movl $0x00000000, 36(%rsp) movq 8(%rsp), %rdi call cudaEventCreate@PLT leaq 72(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 64(%rsp), %rdi call cudaEventRecord@PLT leaq 40(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 48(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 56(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbx, %rdx movq %r13, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %r14, %rsi movq 48(%rsp), %rdi call cudaMemcpy@PLT movl 20(%rsp), %eax cltd idivl %ebp movl %eax, 92(%rsp) movl $1, 96(%rsp) movl $1, 100(%rsp) movl %ebp, 80(%rsp) movl $1, 84(%rsp) movl $1, 88(%rsp) movl $0, %r9d movl $0, %r8d movq 92(%rsp), %rdx movl $1, %ecx movq 80(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L25 movq 56(%rsp), %rdx movq 48(%rsp), %rsi movq 40(%rsp), %rdi call _Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_ jmp .L25 .L40: movss .LC4(%rip), %xmm0 mulss %xmm1, %xmm0 movss 24(%rsp), %xmm3 comiss %xmm0, %xmm3 jbe .L27 pxor %xmm4, %xmm4 cvtsi2ssl %ebp, %xmm4 movss %xmm4, 28(%rsp) movss %xmm0, 24(%rsp) .L27: addl $10, %ebp cmpl $1000, %ebp je .L29 .L24: movl $4, %r12d movl $0x00000000, 16(%rsp) leaq 64(%rsp), %rax movq %rax, 8(%rsp) jmp .L26 .L29: cvttss2sil 28(%rsp), %eax movl %eax, count(%rip) jmp .L19 .L38: call __stack_chk_fail@PLT .cfi_endproc .LFE2029: .size _Z12GPU_SummatorPdS_S_ii, .-_Z12GPU_SummatorPdS_S_ii .globl _Z35__device_stub__Z13CUDASinusatorPdS_PdS_ .type _Z35__device_stub__Z13CUDASinusatorPdS_PdS_, @function _Z35__device_stub__Z13CUDASinusatorPdS_PdS_: .LFB2056: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L45 .L41: movq 104(%rsp), %rax subq %fs:40, %rax jne .L46 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L45: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z13CUDASinusatorPdS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L41 .L46: call __stack_chk_fail@PLT .cfi_endproc .LFE2056: .size _Z35__device_stub__Z13CUDASinusatorPdS_PdS_, .-_Z35__device_stub__Z13CUDASinusatorPdS_PdS_ .globl _Z13CUDASinusatorPdS_ .type _Z13CUDASinusatorPdS_, @function _Z13CUDASinusatorPdS_: .LFB2057: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z35__device_stub__Z13CUDASinusatorPdS_PdS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z13CUDASinusatorPdS_, .-_Z13CUDASinusatorPdS_ .globl _Z13GPU_SinusatorPdS_i .type _Z13GPU_SinusatorPdS_i, @function _Z13GPU_SinusatorPdS_i: .LFB2028: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $48, %rsp .cfi_def_cfa_offset 96 movq %rdi, %r14 movq %rsi, %r12 movl %edx, %r13d movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movslq %edx, %rbx leaq 0(,%rbx,8), %rbp movq %rsp, %rdi movq %rbp, %rsi call cudaMalloc@PLT leaq 8(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbp, %rdx movq %r14, %rsi movq (%rsp), %rdi call cudaMemcpy@PLT imulq $1374389535, %rbx, %rbx sarq $37, %rbx sarl $31, %r13d subl %r13d, %ebx movl %ebx, 28(%rsp) movl $1, 32(%rsp) movl $100, 16(%rsp) movl $1, 20(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L53 .L50: movl $2, %ecx movq %rbp, %rdx movq 8(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movq (%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L54 movl $9999, %eax addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L53: .cfi_restore_state movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z35__device_stub__Z13CUDASinusatorPdS_PdS_ jmp .L50 .L54: call __stack_chk_fail@PLT .cfi_endproc .LFE2028: .size _Z13GPU_SinusatorPdS_i, .-_Z13GPU_SinusatorPdS_i .globl _Z38__device_stub__Z13RangeSummatorPdS_S_iPdS_S_i .type _Z38__device_stub__Z13RangeSummatorPdS_S_iPdS_S_i, @function _Z38__device_stub__Z13RangeSummatorPdS_S_iPdS_S_i: .LFB2058: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L59 .L55: movq 136(%rsp), %rax subq %fs:40, %rax jne .L60 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L59: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z13RangeSummatorPdS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L55 .L60: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size _Z38__device_stub__Z13RangeSummatorPdS_S_iPdS_S_i, .-_Z38__device_stub__Z13RangeSummatorPdS_S_iPdS_S_i .globl _Z13RangeSummatorPdS_S_i .type _Z13RangeSummatorPdS_S_i, @function _Z13RangeSummatorPdS_S_i: .LFB2059: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z38__device_stub__Z13RangeSummatorPdS_S_iPdS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _Z13RangeSummatorPdS_S_i, .-_Z13RangeSummatorPdS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC5: .string "_Z13RangeSummatorPdS_S_i" .LC6: .string "_Z13CUDASinusatorPdS_" .LC7: .string "_Z12CUDASummatorPdS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2061: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _Z13RangeSummatorPdS_S_i(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq _Z13CUDASinusatorPdS_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z12CUDASummatorPdS_S_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .globl count .data .align 4 .type count, @object .size count, 4 count: .long 100 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC1: .long 1092616192 .align 4 .LC2: .long 1176256512 .align 4 .LC3: .long 1148846080 .align 4 .LC4: .long 1048576000 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "cuda_runtime.h" #include <math.h> void SimpleSummator(double* a, double* b, double* c, int length){ for (int i = 0; i < length; i++){ c[i] = sinf(a[i]) + sinf(b[i]); } } __global__ void CUDASummator(double* a, double* b, double* c){ int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = sinf(a[i]) + sinf(b[i]); } __global__ void CUDASinusator(double* a, double* result){ int i = threadIdx.x + blockIdx.x * blockDim.x; result[i] = sinf(a[i]); } __global__ void RangeSummator(double* a, double* b, double* c, int bottomB){ int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = sinf(a[i]); } int GPU_Sinusator(double* a, double* result, int length){ double* dev_a; double* dev_result; cudaMalloc((void**)&dev_a, length * sizeof(double)); cudaMalloc((void**)&dev_result, length * sizeof(double)); cudaMemcpy(dev_a, a, length * sizeof(double), cudaMemcpyHostToDevice); CUDASinusator<<<100,length/100>>>(dev_a,dev_result); cudaMemcpy(result,dev_result,length * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(dev_a); cudaFree(dev_result); return 9999; } int count = 100; int GPU_Summator(double* a, double* b, double* c, int length, int type){ if (type == 1) { SimpleSummator(a,b,c,length); return 11111; } if (type == 2) { double* dev_a; double* dev_b; double* dev_c; float elapsed=0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaMalloc((void**)&dev_a, length * sizeof(double)); cudaMalloc((void**)&dev_b, length * sizeof(double)); cudaMalloc((void**)&dev_c, length * sizeof(double)); cudaMemcpy(dev_a, a, length * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, length * sizeof(double), cudaMemcpyHostToDevice); CUDASummator<<<count,length/count>>>(dev_a,dev_b,dev_c); cudaMemcpy(c,dev_c,length * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cudaEventRecord(stop, 0); cudaEventSynchronize (stop); cudaEventElapsedTime(&elapsed, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); return (int)(elapsed*1000); } int repeatC = 4; float el; if (type == 3){ float bestT = 10000.0; float bestC = 10; for (int i = 10; i < 1000; i+=10){ el = 0.0; for (int r = 0; r < repeatC; r++){ double* dev_a; double* dev_b; double* dev_c; float elapsed=0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaMalloc((void**)&dev_a, length * sizeof(double)); cudaMalloc((void**)&dev_b, length * sizeof(double)); cudaMalloc((void**)&dev_c, length * sizeof(double)); cudaMemcpy(dev_a, a, length * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, length * sizeof(double), cudaMemcpyHostToDevice); CUDASummator<<<i,length/i>>>(dev_a,dev_b,dev_c); cudaMemcpy(c,dev_c,length * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cudaEventRecord(stop, 0); cudaEventSynchronize (stop); cudaEventElapsedTime(&elapsed, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); el += elapsed; } if (el/repeatC < bestT) { bestT = el/repeatC; bestC = i; } } count = bestC; return bestC; } return 99999; };
#include "hip/hip_runtime.h" #include <math.h> void SimpleSummator(double* a, double* b, double* c, int length){ for (int i = 0; i < length; i++){ c[i] = sinf(a[i]) + sinf(b[i]); } } __global__ void CUDASummator(double* a, double* b, double* c){ int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = sinf(a[i]) + sinf(b[i]); } __global__ void CUDASinusator(double* a, double* result){ int i = threadIdx.x + blockIdx.x * blockDim.x; result[i] = sinf(a[i]); } __global__ void RangeSummator(double* a, double* b, double* c, int bottomB){ int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = sinf(a[i]); } int GPU_Sinusator(double* a, double* result, int length){ double* dev_a; double* dev_result; hipMalloc((void**)&dev_a, length * sizeof(double)); hipMalloc((void**)&dev_result, length * sizeof(double)); hipMemcpy(dev_a, a, length * sizeof(double), hipMemcpyHostToDevice); CUDASinusator<<<100,length/100>>>(dev_a,dev_result); hipMemcpy(result,dev_result,length * sizeof(double), hipMemcpyDeviceToHost); hipFree(dev_a); hipFree(dev_result); return 9999; } int count = 100; int GPU_Summator(double* a, double* b, double* c, int length, int type){ if (type == 1) { SimpleSummator(a,b,c,length); return 11111; } if (type == 2) { double* dev_a; double* dev_b; double* dev_c; float elapsed=0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipMalloc((void**)&dev_a, length * sizeof(double)); hipMalloc((void**)&dev_b, length * sizeof(double)); hipMalloc((void**)&dev_c, length * sizeof(double)); hipMemcpy(dev_a, a, length * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, length * sizeof(double), hipMemcpyHostToDevice); CUDASummator<<<count,length/count>>>(dev_a,dev_b,dev_c); hipMemcpy(c,dev_c,length * sizeof(double), hipMemcpyDeviceToHost); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); hipEventRecord(stop, 0); hipEventSynchronize (stop); hipEventElapsedTime(&elapsed, start, stop); hipEventDestroy(start); hipEventDestroy(stop); return (int)(elapsed*1000); } int repeatC = 4; float el; if (type == 3){ float bestT = 10000.0; float bestC = 10; for (int i = 10; i < 1000; i+=10){ el = 0.0; for (int r = 0; r < repeatC; r++){ double* dev_a; double* dev_b; double* dev_c; float elapsed=0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipMalloc((void**)&dev_a, length * sizeof(double)); hipMalloc((void**)&dev_b, length * sizeof(double)); hipMalloc((void**)&dev_c, length * sizeof(double)); hipMemcpy(dev_a, a, length * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, length * sizeof(double), hipMemcpyHostToDevice); CUDASummator<<<i,length/i>>>(dev_a,dev_b,dev_c); hipMemcpy(c,dev_c,length * sizeof(double), hipMemcpyDeviceToHost); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); hipEventRecord(stop, 0); hipEventSynchronize (stop); hipEventElapsedTime(&elapsed, start, stop); hipEventDestroy(start); hipEventDestroy(stop); el += elapsed; } if (el/repeatC < bestT) { bestT = el/repeatC; bestC = i; } } count = bestC; return bestC; } return 99999; };
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include "hip/hip_runtime.h" #include <math.h> void SimpleSummator(double* a, double* b, double* c, int length){ for (int i = 0; i < length; i++){ c[i] = sinf(a[i]) + sinf(b[i]); } } __global__ void CUDASummator(double* a, double* b, double* c){ int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = sinf(a[i]) + sinf(b[i]); } __global__ void CUDASinusator(double* a, double* result){ int i = threadIdx.x + blockIdx.x * blockDim.x; result[i] = sinf(a[i]); } __global__ void RangeSummator(double* a, double* b, double* c, int bottomB){ int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = sinf(a[i]); } int GPU_Sinusator(double* a, double* result, int length){ double* dev_a; double* dev_result; hipMalloc((void**)&dev_a, length * sizeof(double)); hipMalloc((void**)&dev_result, length * sizeof(double)); hipMemcpy(dev_a, a, length * sizeof(double), hipMemcpyHostToDevice); CUDASinusator<<<100,length/100>>>(dev_a,dev_result); hipMemcpy(result,dev_result,length * sizeof(double), hipMemcpyDeviceToHost); hipFree(dev_a); hipFree(dev_result); return 9999; } int count = 100; int GPU_Summator(double* a, double* b, double* c, int length, int type){ if (type == 1) { SimpleSummator(a,b,c,length); return 11111; } if (type == 2) { double* dev_a; double* dev_b; double* dev_c; float elapsed=0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipMalloc((void**)&dev_a, length * sizeof(double)); hipMalloc((void**)&dev_b, length * sizeof(double)); hipMalloc((void**)&dev_c, length * sizeof(double)); hipMemcpy(dev_a, a, length * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, length * sizeof(double), hipMemcpyHostToDevice); CUDASummator<<<count,length/count>>>(dev_a,dev_b,dev_c); hipMemcpy(c,dev_c,length * sizeof(double), hipMemcpyDeviceToHost); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); hipEventRecord(stop, 0); hipEventSynchronize (stop); hipEventElapsedTime(&elapsed, start, stop); hipEventDestroy(start); hipEventDestroy(stop); return (int)(elapsed*1000); } int repeatC = 4; float el; if (type == 3){ float bestT = 10000.0; float bestC = 10; for (int i = 10; i < 1000; i+=10){ el = 0.0; for (int r = 0; r < repeatC; r++){ double* dev_a; double* dev_b; double* dev_c; float elapsed=0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipMalloc((void**)&dev_a, length * sizeof(double)); hipMalloc((void**)&dev_b, length * sizeof(double)); hipMalloc((void**)&dev_c, length * sizeof(double)); hipMemcpy(dev_a, a, length * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, length * sizeof(double), hipMemcpyHostToDevice); CUDASummator<<<i,length/i>>>(dev_a,dev_b,dev_c); hipMemcpy(c,dev_c,length * sizeof(double), hipMemcpyDeviceToHost); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); hipEventRecord(stop, 0); hipEventSynchronize (stop); hipEventElapsedTime(&elapsed, start, stop); hipEventDestroy(start); hipEventDestroy(stop); el += elapsed; } if (el/repeatC < bestT) { bestT = el/repeatC; bestC = i; } } count = bestC; return bestC; } return 99999; };
.text .file "CUDA.hip" .globl _Z14SimpleSummatorPdS_S_i # -- Begin function _Z14SimpleSummatorPdS_S_i .p2align 4, 0x90 .type _Z14SimpleSummatorPdS_S_i,@function _Z14SimpleSummatorPdS_S_i: # @_Z14SimpleSummatorPdS_S_i .cfi_startproc # %bb.0: testl %ecx, %ecx jle .LBB0_4 # %bb.1: # %.lr.ph.preheader pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $16, %rsp .cfi_def_cfa_offset 64 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 movl %ecx, %r12d xorl %r13d, %r13d .p2align 4, 0x90 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movsd (%r15,%r13,8), %xmm0 # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 callq sinf movss %xmm0, 12(%rsp) # 4-byte Spill movsd (%r14,%r13,8), %xmm0 # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 callq sinf addss 12(%rsp), %xmm0 # 4-byte Folded Reload cvtss2sd %xmm0, %xmm0 movsd %xmm0, (%rbx,%r13,8) incq %r13 cmpq %r13, %r12 jne .LBB0_2 # %bb.3: addq $16, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .LBB0_4: # %._crit_edge retq .Lfunc_end0: .size _Z14SimpleSummatorPdS_S_i, .Lfunc_end0-_Z14SimpleSummatorPdS_S_i .cfi_endproc # -- End function .globl _Z27__device_stub__CUDASummatorPdS_S_ # -- Begin function _Z27__device_stub__CUDASummatorPdS_S_ .p2align 4, 0x90 .type _Z27__device_stub__CUDASummatorPdS_S_,@function _Z27__device_stub__CUDASummatorPdS_S_: # @_Z27__device_stub__CUDASummatorPdS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12CUDASummatorPdS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end1: .size _Z27__device_stub__CUDASummatorPdS_S_, .Lfunc_end1-_Z27__device_stub__CUDASummatorPdS_S_ .cfi_endproc # -- End function .globl _Z28__device_stub__CUDASinusatorPdS_ # -- Begin function _Z28__device_stub__CUDASinusatorPdS_ .p2align 4, 0x90 .type _Z28__device_stub__CUDASinusatorPdS_,@function _Z28__device_stub__CUDASinusatorPdS_: # @_Z28__device_stub__CUDASinusatorPdS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z13CUDASinusatorPdS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end2: .size _Z28__device_stub__CUDASinusatorPdS_, .Lfunc_end2-_Z28__device_stub__CUDASinusatorPdS_ .cfi_endproc # -- End function .globl _Z28__device_stub__RangeSummatorPdS_S_i # -- Begin function _Z28__device_stub__RangeSummatorPdS_S_i .p2align 4, 0x90 .type _Z28__device_stub__RangeSummatorPdS_S_i,@function _Z28__device_stub__RangeSummatorPdS_S_i: # @_Z28__device_stub__RangeSummatorPdS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z13RangeSummatorPdS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end3: .size _Z28__device_stub__RangeSummatorPdS_S_i, .Lfunc_end3-_Z28__device_stub__RangeSummatorPdS_S_i .cfi_endproc # -- End function .globl _Z13GPU_SinusatorPdS_i # -- Begin function _Z13GPU_SinusatorPdS_i .p2align 4, 0x90 .type _Z13GPU_SinusatorPdS_i,@function _Z13GPU_SinusatorPdS_i: # @_Z13GPU_SinusatorPdS_i .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 subq $104, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rsi, %rbx movq %rdi, %r15 movslq %edx, %r12 leaq (,%r12,8), %r14 leaq 8(%rsp), %rdi movq %r14, %rsi callq hipMalloc movq %rsp, %rdi movq %r14, %rsi callq hipMalloc movq 8(%rsp), %rdi movq %r15, %rsi movq %r14, %rdx movl $1, %ecx callq hipMemcpy imulq $1374389535, %r12, %rdx # imm = 0x51EB851F movq %rdx, %rax shrq $63, %rax sarq $37, %rdx addl %eax, %edx movabsq $4294967296, %rdi # imm = 0x100000000 orq %rdi, %rdx orq $100, %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_2 # %bb.1: movq 8(%rsp), %rax movq (%rsp), %rcx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z13CUDASinusatorPdS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_2: movq (%rsp), %rsi movq %rbx, %rdi movq %r14, %rdx movl $2, %ecx callq hipMemcpy movq 8(%rsp), %rdi callq hipFree movq (%rsp), %rdi callq hipFree movl $9999, %eax # imm = 0x270F addq $104, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _Z13GPU_SinusatorPdS_i, .Lfunc_end4-_Z13GPU_SinusatorPdS_i .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z12GPU_SummatorPdS_S_ii .LCPI5_0: .long 0x41200000 # float 10 .LCPI5_1: .long 0x461c4000 # float 1.0E+4 .LCPI5_2: .long 0x3e800000 # float 0.25 .LCPI5_3: .long 0x447a0000 # float 1000 .text .globl _Z12GPU_SummatorPdS_S_ii .p2align 4, 0x90 .type _Z12GPU_SummatorPdS_S_ii,@function _Z12GPU_SummatorPdS_S_ii: # @_Z12GPU_SummatorPdS_S_ii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $216, %rsp .cfi_def_cfa_offset 272 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdx, %r15 movq %rsi, %r13 movq %rdi, %rbp cmpl $1, %r8d je .LBB5_10 # %bb.1: cmpl $2, %r8d je .LBB5_14 # %bb.2: movl $99999, %eax # imm = 0x1869F cmpl $3, %r8d jne .LBB5_17 # %bb.3: # %.preheader74 movq %rbp, 176(%rsp) # 8-byte Spill movq %r13, 184(%rsp) # 8-byte Spill movl %ecx, %ebp movslq %ecx, %r12 shlq $3, %r12 movss .LCPI5_0(%rip), %xmm2 # xmm2 = mem[0],zero,zero,zero movss .LCPI5_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero movl $10, %ebx jmp .LBB5_4 .p2align 4, 0x90 .LBB5_8: # in Loop: Header=BB5_4 Depth=1 cvtsi2ss %ebx, %xmm1 mulss .LCPI5_2(%rip), %xmm3 movaps %xmm3, %xmm0 movss 140(%rsp), %xmm2 # 4-byte Reload # xmm2 = mem[0],zero,zero,zero cmpltss %xmm2, %xmm0 andps %xmm0, %xmm1 andnps 192(%rsp), %xmm0 # 16-byte Folded Reload orps %xmm1, %xmm0 minss %xmm2, %xmm3 leaq 10(%rbx), %rax movaps %xmm3, %xmm1 movaps %xmm0, %xmm2 cmpq $990, %rbx # imm = 0x3DE movq %rax, %rbx jae .LBB5_9 .LBB5_4: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB5_5 Depth 2 movaps %xmm2, 192(%rsp) # 16-byte Spill movss %xmm1, 140(%rsp) # 4-byte Spill movq %rbx, %r13 movabsq $4294967296, %rax # imm = 0x100000000 orq %rax, %r13 xorps %xmm3, %xmm3 movl $4, %r14d jmp .LBB5_5 .p2align 4, 0x90 .LBB5_7: # in Loop: Header=BB5_5 Depth=2 movq 32(%rsp), %rsi movq %r15, %rdi movq %r12, %rdx movl $2, %ecx callq hipMemcpy movq 48(%rsp), %rdi callq hipFree movq 40(%rsp), %rdi callq hipFree movq 32(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 16(%rsp), %rdi callq hipEventSynchronize movq 24(%rsp), %rsi movq 16(%rsp), %rdx leaq 12(%rsp), %rdi callq hipEventElapsedTime movq 24(%rsp), %rdi callq hipEventDestroy movq 16(%rsp), %rdi callq hipEventDestroy movss 60(%rsp), %xmm3 # 4-byte Reload # xmm3 = mem[0],zero,zero,zero addss 12(%rsp), %xmm3 decl %r14d je .LBB5_8 .LBB5_5: # Parent Loop BB5_4 Depth=1 # => This Inner Loop Header: Depth=2 movss %xmm3, 60(%rsp) # 4-byte Spill movl $0, 12(%rsp) leaq 24(%rsp), %rdi callq hipEventCreate leaq 16(%rsp), %rdi callq hipEventCreate movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord leaq 48(%rsp), %rdi movq %r12, %rsi callq hipMalloc leaq 40(%rsp), %rdi movq %r12, %rsi callq hipMalloc leaq 32(%rsp), %rdi movq %r12, %rsi callq hipMalloc movq 48(%rsp), %rdi movq 176(%rsp), %rsi # 8-byte Reload movq %r12, %rdx movl $1, %ecx callq hipMemcpy movq 40(%rsp), %rdi movq 184(%rsp), %rsi # 8-byte Reload movq %r12, %rdx movl $1, %ecx callq hipMemcpy movl %ebp, %eax cltd idivl %ebx # kill: def $eax killed $eax def $rax movabsq $4294967296, %rcx # imm = 0x100000000 orq %rcx, %rax movq %r13, %rdi movl $1, %esi movq %rax, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_7 # %bb.6: # in Loop: Header=BB5_5 Depth=2 movq 48(%rsp), %rax movq 40(%rsp), %rcx movq 32(%rsp), %rdx movq %rax, 128(%rsp) movq %rcx, 120(%rsp) movq %rdx, 112(%rsp) leaq 128(%rsp), %rax movq %rax, 144(%rsp) leaq 120(%rsp), %rax movq %rax, 152(%rsp) leaq 112(%rsp), %rax movq %rax, 160(%rsp) leaq 96(%rsp), %rdi leaq 80(%rsp), %rsi leaq 72(%rsp), %rdx leaq 64(%rsp), %rcx callq __hipPopCallConfiguration movq 96(%rsp), %rsi movl 104(%rsp), %edx movq 80(%rsp), %rcx movl 88(%rsp), %r8d movl $_Z12CUDASummatorPdS_S_, %edi leaq 144(%rsp), %r9 pushq 64(%rsp) .cfi_adjust_cfa_offset 8 pushq 80(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 jmp .LBB5_7 .LBB5_14: movl $0, 12(%rsp) leaq 24(%rsp), %rdi movl %ecx, %ebx callq hipEventCreate leaq 16(%rsp), %rdi callq hipEventCreate movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movslq %ebx, %r12 shlq $3, %r12 leaq 48(%rsp), %rdi movq %r12, %rsi callq hipMalloc leaq 40(%rsp), %rdi movq %r12, %rsi callq hipMalloc leaq 32(%rsp), %rdi movq %r12, %rsi callq hipMalloc movq 48(%rsp), %rdi movq %rbp, %rsi movq %r12, %rdx movl $1, %ecx callq hipMemcpy movq 40(%rsp), %rdi movq %r13, %rsi movq %r12, %rdx movl $1, %ecx callq hipMemcpy movl count(%rip), %edi movl %ebx, %eax cltd idivl %edi # kill: def $eax killed $eax def $rax movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq %rax, %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_16 # %bb.15: movq 48(%rsp), %rax movq 40(%rsp), %rcx movq 32(%rsp), %rdx movq %rax, 128(%rsp) movq %rcx, 120(%rsp) movq %rdx, 112(%rsp) leaq 128(%rsp), %rax movq %rax, 144(%rsp) leaq 120(%rsp), %rax movq %rax, 152(%rsp) leaq 112(%rsp), %rax movq %rax, 160(%rsp) leaq 96(%rsp), %rdi leaq 80(%rsp), %rsi leaq 72(%rsp), %rdx leaq 64(%rsp), %rcx callq __hipPopCallConfiguration movq 96(%rsp), %rsi movl 104(%rsp), %edx movq 80(%rsp), %rcx movl 88(%rsp), %r8d leaq 144(%rsp), %r9 movl $_Z12CUDASummatorPdS_S_, %edi pushq 64(%rsp) .cfi_adjust_cfa_offset 8 pushq 80(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB5_16: movq 32(%rsp), %rsi movq %r15, %rdi movq %r12, %rdx movl $2, %ecx callq hipMemcpy movq 48(%rsp), %rdi callq hipFree movq 40(%rsp), %rdi callq hipFree movq 32(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 16(%rsp), %rdi callq hipEventSynchronize movq 24(%rsp), %rsi movq 16(%rsp), %rdx leaq 12(%rsp), %rdi callq hipEventElapsedTime movq 24(%rsp), %rdi callq hipEventDestroy movq 16(%rsp), %rdi callq hipEventDestroy movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero mulss .LCPI5_3(%rip), %xmm0 cvttss2si %xmm0, %eax jmp .LBB5_17 .LBB5_10: movl $11111, %eax # imm = 0x2B67 testl %ecx, %ecx jle .LBB5_17 # %bb.11: # %.lr.ph.preheader.i movl %ecx, %ebx xorl %r14d, %r14d .p2align 4, 0x90 .LBB5_12: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 movsd (%rbp,%r14,8), %xmm0 # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 callq sinf movss %xmm0, 60(%rsp) # 4-byte Spill movsd (%r13,%r14,8), %xmm0 # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 callq sinf addss 60(%rsp), %xmm0 # 4-byte Folded Reload cvtss2sd %xmm0, %xmm0 movsd %xmm0, (%r15,%r14,8) incq %r14 cmpq %r14, %rbx jne .LBB5_12 # %bb.13: movl $11111, %eax # imm = 0x2B67 jmp .LBB5_17 .LBB5_9: cvttss2si %xmm0, %eax movl %eax, count(%rip) .LBB5_17: # %_Z14SimpleSummatorPdS_S_i.exit addq $216, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size _Z12GPU_SummatorPdS_S_ii, .Lfunc_end5-_Z12GPU_SummatorPdS_S_ii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12CUDASummatorPdS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z13CUDASinusatorPdS_, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z13RangeSummatorPdS_S_i, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type _Z12CUDASummatorPdS_S_,@object # @_Z12CUDASummatorPdS_S_ .section .rodata,"a",@progbits .globl _Z12CUDASummatorPdS_S_ .p2align 3, 0x0 _Z12CUDASummatorPdS_S_: .quad _Z27__device_stub__CUDASummatorPdS_S_ .size _Z12CUDASummatorPdS_S_, 8 .type _Z13CUDASinusatorPdS_,@object # @_Z13CUDASinusatorPdS_ .globl _Z13CUDASinusatorPdS_ .p2align 3, 0x0 _Z13CUDASinusatorPdS_: .quad _Z28__device_stub__CUDASinusatorPdS_ .size _Z13CUDASinusatorPdS_, 8 .type _Z13RangeSummatorPdS_S_i,@object # @_Z13RangeSummatorPdS_S_i .globl _Z13RangeSummatorPdS_S_i .p2align 3, 0x0 _Z13RangeSummatorPdS_S_i: .quad _Z28__device_stub__RangeSummatorPdS_S_i .size _Z13RangeSummatorPdS_S_i, 8 .type count,@object # @count .data .globl count .p2align 2, 0x0 count: .long 100 # 0x64 .size count, 4 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z12CUDASummatorPdS_S_" .size .L__unnamed_1, 23 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z13CUDASinusatorPdS_" .size .L__unnamed_2, 22 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z13RangeSummatorPdS_S_i" .size .L__unnamed_3, 25 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__CUDASummatorPdS_S_ .addrsig_sym _Z28__device_stub__CUDASinusatorPdS_ .addrsig_sym _Z28__device_stub__RangeSummatorPdS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12CUDASummatorPdS_S_ .addrsig_sym _Z13CUDASinusatorPdS_ .addrsig_sym _Z13RangeSummatorPdS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00143a5a_00000000-6_CUDA.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2032: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2032: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z14SimpleSummatorPdS_S_i .type _Z14SimpleSummatorPdS_S_i, @function _Z14SimpleSummatorPdS_S_i: .LFB2027: .cfi_startproc endbr64 testl %ecx, %ecx jle .L8 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $16, %rsp .cfi_def_cfa_offset 64 movq %rdi, %r12 movq %rsi, %r13 movq %rdx, %r14 movslq %ecx, %rcx leaq 0(,%rcx,8), %rbp movl $0, %ebx .L5: pxor %xmm0, %xmm0 cvtsd2ss (%r12,%rbx), %xmm0 call sinf@PLT movss %xmm0, 12(%rsp) pxor %xmm0, %xmm0 cvtsd2ss 0(%r13,%rbx), %xmm0 call sinf@PLT addss 12(%rsp), %xmm0 cvtss2sd %xmm0, %xmm0 movsd %xmm0, (%r14,%rbx) addq $8, %rbx cmpq %rbp, %rbx jne .L5 addq $16, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L8: .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 .cfi_restore 13 .cfi_restore 14 ret .cfi_endproc .LFE2027: .size _Z14SimpleSummatorPdS_S_i, .-_Z14SimpleSummatorPdS_S_i .globl _Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_ .type _Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_, @function _Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_: .LFB2054: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 120(%rsp), %rax subq %fs:40, %rax jne .L16 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12CUDASummatorPdS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2054: .size _Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_, .-_Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_ .globl _Z12CUDASummatorPdS_S_ .type _Z12CUDASummatorPdS_S_, @function _Z12CUDASummatorPdS_S_: .LFB2055: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2055: .size _Z12CUDASummatorPdS_S_, .-_Z12CUDASummatorPdS_S_ .globl _Z12GPU_SummatorPdS_S_ii .type _Z12GPU_SummatorPdS_S_ii, @function _Z12GPU_SummatorPdS_S_ii: .LFB2029: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $120, %rsp .cfi_def_cfa_offset 176 movq %rdi, %r13 movq %rsi, %r14 movq %rdx, %r15 movl %ecx, 20(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax cmpl $1, %r8d je .L35 cmpl $2, %r8d je .L36 movl $99999, %eax cmpl $3, %r8d je .L37 .L19: movq 104(%rsp), %rdx subq %fs:40, %rdx jne .L38 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L35: .cfi_restore_state call _Z14SimpleSummatorPdS_S_i movl $11111, %eax jmp .L19 .L36: movl $0x00000000, 36(%rsp) leaq 64(%rsp), %rdi call cudaEventCreate@PLT leaq 72(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 64(%rsp), %rdi call cudaEventRecord@PLT movl 20(%rsp), %ebp movslq %ebp, %rbx salq $3, %rbx leaq 40(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 48(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 56(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbx, %rdx movq %r13, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %r14, %rsi movq 48(%rsp), %rdi call cudaMemcpy@PLT movl count(%rip), %ecx movl %ebp, %eax cltd idivl %ecx movl %eax, 92(%rsp) movl $1, 96(%rsp) movl $1, 100(%rsp) movl %ecx, 80(%rsp) movl $1, 84(%rsp) movl $1, 88(%rsp) movl $0, %r9d movl $0, %r8d movq 92(%rsp), %rdx movl $1, %ecx movq 80(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L39 .L23: movl $2, %ecx movq %rbx, %rdx movq 56(%rsp), %rsi movq %r15, %rdi call cudaMemcpy@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq 48(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rdi call cudaFree@PLT movl $0, %esi movq 72(%rsp), %rdi call cudaEventRecord@PLT movq 72(%rsp), %rdi call cudaEventSynchronize@PLT leaq 36(%rsp), %rdi movq 72(%rsp), %rdx movq 64(%rsp), %rsi call cudaEventElapsedTime@PLT movq 64(%rsp), %rdi call cudaEventDestroy@PLT movq 72(%rsp), %rdi call cudaEventDestroy@PLT movss .LC3(%rip), %xmm0 mulss 36(%rsp), %xmm0 cvttss2sil %xmm0, %eax jmp .L19 .L39: movq 56(%rsp), %rdx movq 48(%rsp), %rsi movq 40(%rsp), %rdi call _Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_ jmp .L23 .L37: movslq 20(%rsp), %rbx salq $3, %rbx movl $10, %ebp movss .LC1(%rip), %xmm5 movss %xmm5, 28(%rsp) movss .LC2(%rip), %xmm6 movss %xmm6, 24(%rsp) jmp .L24 .L25: movl $2, %ecx movq %rbx, %rdx movq 56(%rsp), %rsi movq %r15, %rdi call cudaMemcpy@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq 48(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rdi call cudaFree@PLT movl $0, %esi movq 72(%rsp), %rdi call cudaEventRecord@PLT movq 72(%rsp), %rdi call cudaEventSynchronize@PLT leaq 36(%rsp), %rdi movq 72(%rsp), %rdx movq 64(%rsp), %rsi call cudaEventElapsedTime@PLT movq 64(%rsp), %rdi call cudaEventDestroy@PLT movq 72(%rsp), %rdi call cudaEventDestroy@PLT movss 16(%rsp), %xmm1 addss 36(%rsp), %xmm1 movss %xmm1, 16(%rsp) subl $1, %r12d je .L40 .L26: movl $0x00000000, 36(%rsp) movq 8(%rsp), %rdi call cudaEventCreate@PLT leaq 72(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 64(%rsp), %rdi call cudaEventRecord@PLT leaq 40(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 48(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 56(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbx, %rdx movq %r13, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %r14, %rsi movq 48(%rsp), %rdi call cudaMemcpy@PLT movl 20(%rsp), %eax cltd idivl %ebp movl %eax, 92(%rsp) movl $1, 96(%rsp) movl $1, 100(%rsp) movl %ebp, 80(%rsp) movl $1, 84(%rsp) movl $1, 88(%rsp) movl $0, %r9d movl $0, %r8d movq 92(%rsp), %rdx movl $1, %ecx movq 80(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L25 movq 56(%rsp), %rdx movq 48(%rsp), %rsi movq 40(%rsp), %rdi call _Z36__device_stub__Z12CUDASummatorPdS_S_PdS_S_ jmp .L25 .L40: movss .LC4(%rip), %xmm0 mulss %xmm1, %xmm0 movss 24(%rsp), %xmm3 comiss %xmm0, %xmm3 jbe .L27 pxor %xmm4, %xmm4 cvtsi2ssl %ebp, %xmm4 movss %xmm4, 28(%rsp) movss %xmm0, 24(%rsp) .L27: addl $10, %ebp cmpl $1000, %ebp je .L29 .L24: movl $4, %r12d movl $0x00000000, 16(%rsp) leaq 64(%rsp), %rax movq %rax, 8(%rsp) jmp .L26 .L29: cvttss2sil 28(%rsp), %eax movl %eax, count(%rip) jmp .L19 .L38: call __stack_chk_fail@PLT .cfi_endproc .LFE2029: .size _Z12GPU_SummatorPdS_S_ii, .-_Z12GPU_SummatorPdS_S_ii .globl _Z35__device_stub__Z13CUDASinusatorPdS_PdS_ .type _Z35__device_stub__Z13CUDASinusatorPdS_PdS_, @function _Z35__device_stub__Z13CUDASinusatorPdS_PdS_: .LFB2056: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L45 .L41: movq 104(%rsp), %rax subq %fs:40, %rax jne .L46 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L45: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z13CUDASinusatorPdS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L41 .L46: call __stack_chk_fail@PLT .cfi_endproc .LFE2056: .size _Z35__device_stub__Z13CUDASinusatorPdS_PdS_, .-_Z35__device_stub__Z13CUDASinusatorPdS_PdS_ .globl _Z13CUDASinusatorPdS_ .type _Z13CUDASinusatorPdS_, @function _Z13CUDASinusatorPdS_: .LFB2057: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z35__device_stub__Z13CUDASinusatorPdS_PdS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z13CUDASinusatorPdS_, .-_Z13CUDASinusatorPdS_ .globl _Z13GPU_SinusatorPdS_i .type _Z13GPU_SinusatorPdS_i, @function _Z13GPU_SinusatorPdS_i: .LFB2028: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $48, %rsp .cfi_def_cfa_offset 96 movq %rdi, %r14 movq %rsi, %r12 movl %edx, %r13d movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movslq %edx, %rbx leaq 0(,%rbx,8), %rbp movq %rsp, %rdi movq %rbp, %rsi call cudaMalloc@PLT leaq 8(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbp, %rdx movq %r14, %rsi movq (%rsp), %rdi call cudaMemcpy@PLT imulq $1374389535, %rbx, %rbx sarq $37, %rbx sarl $31, %r13d subl %r13d, %ebx movl %ebx, 28(%rsp) movl $1, 32(%rsp) movl $100, 16(%rsp) movl $1, 20(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L53 .L50: movl $2, %ecx movq %rbp, %rdx movq 8(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movq (%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L54 movl $9999, %eax addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L53: .cfi_restore_state movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z35__device_stub__Z13CUDASinusatorPdS_PdS_ jmp .L50 .L54: call __stack_chk_fail@PLT .cfi_endproc .LFE2028: .size _Z13GPU_SinusatorPdS_i, .-_Z13GPU_SinusatorPdS_i .globl _Z38__device_stub__Z13RangeSummatorPdS_S_iPdS_S_i .type _Z38__device_stub__Z13RangeSummatorPdS_S_iPdS_S_i, @function _Z38__device_stub__Z13RangeSummatorPdS_S_iPdS_S_i: .LFB2058: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L59 .L55: movq 136(%rsp), %rax subq %fs:40, %rax jne .L60 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L59: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z13RangeSummatorPdS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L55 .L60: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size _Z38__device_stub__Z13RangeSummatorPdS_S_iPdS_S_i, .-_Z38__device_stub__Z13RangeSummatorPdS_S_iPdS_S_i .globl _Z13RangeSummatorPdS_S_i .type _Z13RangeSummatorPdS_S_i, @function _Z13RangeSummatorPdS_S_i: .LFB2059: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z38__device_stub__Z13RangeSummatorPdS_S_iPdS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _Z13RangeSummatorPdS_S_i, .-_Z13RangeSummatorPdS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC5: .string "_Z13RangeSummatorPdS_S_i" .LC6: .string "_Z13CUDASinusatorPdS_" .LC7: .string "_Z12CUDASummatorPdS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2061: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _Z13RangeSummatorPdS_S_i(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq _Z13CUDASinusatorPdS_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z12CUDASummatorPdS_S_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .globl count .data .align 4 .type count, @object .size count, 4 count: .long 100 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC1: .long 1092616192 .align 4 .LC2: .long 1176256512 .align 4 .LC3: .long 1148846080 .align 4 .LC4: .long 1048576000 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "CUDA.hip" .globl _Z14SimpleSummatorPdS_S_i # -- Begin function _Z14SimpleSummatorPdS_S_i .p2align 4, 0x90 .type _Z14SimpleSummatorPdS_S_i,@function _Z14SimpleSummatorPdS_S_i: # @_Z14SimpleSummatorPdS_S_i .cfi_startproc # %bb.0: testl %ecx, %ecx jle .LBB0_4 # %bb.1: # %.lr.ph.preheader pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $16, %rsp .cfi_def_cfa_offset 64 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 movl %ecx, %r12d xorl %r13d, %r13d .p2align 4, 0x90 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movsd (%r15,%r13,8), %xmm0 # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 callq sinf movss %xmm0, 12(%rsp) # 4-byte Spill movsd (%r14,%r13,8), %xmm0 # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 callq sinf addss 12(%rsp), %xmm0 # 4-byte Folded Reload cvtss2sd %xmm0, %xmm0 movsd %xmm0, (%rbx,%r13,8) incq %r13 cmpq %r13, %r12 jne .LBB0_2 # %bb.3: addq $16, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r12 .cfi_restore %r13 .cfi_restore %r14 .cfi_restore %r15 .LBB0_4: # %._crit_edge retq .Lfunc_end0: .size _Z14SimpleSummatorPdS_S_i, .Lfunc_end0-_Z14SimpleSummatorPdS_S_i .cfi_endproc # -- End function .globl _Z27__device_stub__CUDASummatorPdS_S_ # -- Begin function _Z27__device_stub__CUDASummatorPdS_S_ .p2align 4, 0x90 .type _Z27__device_stub__CUDASummatorPdS_S_,@function _Z27__device_stub__CUDASummatorPdS_S_: # @_Z27__device_stub__CUDASummatorPdS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12CUDASummatorPdS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end1: .size _Z27__device_stub__CUDASummatorPdS_S_, .Lfunc_end1-_Z27__device_stub__CUDASummatorPdS_S_ .cfi_endproc # -- End function .globl _Z28__device_stub__CUDASinusatorPdS_ # -- Begin function _Z28__device_stub__CUDASinusatorPdS_ .p2align 4, 0x90 .type _Z28__device_stub__CUDASinusatorPdS_,@function _Z28__device_stub__CUDASinusatorPdS_: # @_Z28__device_stub__CUDASinusatorPdS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z13CUDASinusatorPdS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end2: .size _Z28__device_stub__CUDASinusatorPdS_, .Lfunc_end2-_Z28__device_stub__CUDASinusatorPdS_ .cfi_endproc # -- End function .globl _Z28__device_stub__RangeSummatorPdS_S_i # -- Begin function _Z28__device_stub__RangeSummatorPdS_S_i .p2align 4, 0x90 .type _Z28__device_stub__RangeSummatorPdS_S_i,@function _Z28__device_stub__RangeSummatorPdS_S_i: # @_Z28__device_stub__RangeSummatorPdS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z13RangeSummatorPdS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end3: .size _Z28__device_stub__RangeSummatorPdS_S_i, .Lfunc_end3-_Z28__device_stub__RangeSummatorPdS_S_i .cfi_endproc # -- End function .globl _Z13GPU_SinusatorPdS_i # -- Begin function _Z13GPU_SinusatorPdS_i .p2align 4, 0x90 .type _Z13GPU_SinusatorPdS_i,@function _Z13GPU_SinusatorPdS_i: # @_Z13GPU_SinusatorPdS_i .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 subq $104, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rsi, %rbx movq %rdi, %r15 movslq %edx, %r12 leaq (,%r12,8), %r14 leaq 8(%rsp), %rdi movq %r14, %rsi callq hipMalloc movq %rsp, %rdi movq %r14, %rsi callq hipMalloc movq 8(%rsp), %rdi movq %r15, %rsi movq %r14, %rdx movl $1, %ecx callq hipMemcpy imulq $1374389535, %r12, %rdx # imm = 0x51EB851F movq %rdx, %rax shrq $63, %rax sarq $37, %rdx addl %eax, %edx movabsq $4294967296, %rdi # imm = 0x100000000 orq %rdi, %rdx orq $100, %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_2 # %bb.1: movq 8(%rsp), %rax movq (%rsp), %rcx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z13CUDASinusatorPdS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_2: movq (%rsp), %rsi movq %rbx, %rdi movq %r14, %rdx movl $2, %ecx callq hipMemcpy movq 8(%rsp), %rdi callq hipFree movq (%rsp), %rdi callq hipFree movl $9999, %eax # imm = 0x270F addq $104, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _Z13GPU_SinusatorPdS_i, .Lfunc_end4-_Z13GPU_SinusatorPdS_i .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z12GPU_SummatorPdS_S_ii .LCPI5_0: .long 0x41200000 # float 10 .LCPI5_1: .long 0x461c4000 # float 1.0E+4 .LCPI5_2: .long 0x3e800000 # float 0.25 .LCPI5_3: .long 0x447a0000 # float 1000 .text .globl _Z12GPU_SummatorPdS_S_ii .p2align 4, 0x90 .type _Z12GPU_SummatorPdS_S_ii,@function _Z12GPU_SummatorPdS_S_ii: # @_Z12GPU_SummatorPdS_S_ii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $216, %rsp .cfi_def_cfa_offset 272 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdx, %r15 movq %rsi, %r13 movq %rdi, %rbp cmpl $1, %r8d je .LBB5_10 # %bb.1: cmpl $2, %r8d je .LBB5_14 # %bb.2: movl $99999, %eax # imm = 0x1869F cmpl $3, %r8d jne .LBB5_17 # %bb.3: # %.preheader74 movq %rbp, 176(%rsp) # 8-byte Spill movq %r13, 184(%rsp) # 8-byte Spill movl %ecx, %ebp movslq %ecx, %r12 shlq $3, %r12 movss .LCPI5_0(%rip), %xmm2 # xmm2 = mem[0],zero,zero,zero movss .LCPI5_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero movl $10, %ebx jmp .LBB5_4 .p2align 4, 0x90 .LBB5_8: # in Loop: Header=BB5_4 Depth=1 cvtsi2ss %ebx, %xmm1 mulss .LCPI5_2(%rip), %xmm3 movaps %xmm3, %xmm0 movss 140(%rsp), %xmm2 # 4-byte Reload # xmm2 = mem[0],zero,zero,zero cmpltss %xmm2, %xmm0 andps %xmm0, %xmm1 andnps 192(%rsp), %xmm0 # 16-byte Folded Reload orps %xmm1, %xmm0 minss %xmm2, %xmm3 leaq 10(%rbx), %rax movaps %xmm3, %xmm1 movaps %xmm0, %xmm2 cmpq $990, %rbx # imm = 0x3DE movq %rax, %rbx jae .LBB5_9 .LBB5_4: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB5_5 Depth 2 movaps %xmm2, 192(%rsp) # 16-byte Spill movss %xmm1, 140(%rsp) # 4-byte Spill movq %rbx, %r13 movabsq $4294967296, %rax # imm = 0x100000000 orq %rax, %r13 xorps %xmm3, %xmm3 movl $4, %r14d jmp .LBB5_5 .p2align 4, 0x90 .LBB5_7: # in Loop: Header=BB5_5 Depth=2 movq 32(%rsp), %rsi movq %r15, %rdi movq %r12, %rdx movl $2, %ecx callq hipMemcpy movq 48(%rsp), %rdi callq hipFree movq 40(%rsp), %rdi callq hipFree movq 32(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 16(%rsp), %rdi callq hipEventSynchronize movq 24(%rsp), %rsi movq 16(%rsp), %rdx leaq 12(%rsp), %rdi callq hipEventElapsedTime movq 24(%rsp), %rdi callq hipEventDestroy movq 16(%rsp), %rdi callq hipEventDestroy movss 60(%rsp), %xmm3 # 4-byte Reload # xmm3 = mem[0],zero,zero,zero addss 12(%rsp), %xmm3 decl %r14d je .LBB5_8 .LBB5_5: # Parent Loop BB5_4 Depth=1 # => This Inner Loop Header: Depth=2 movss %xmm3, 60(%rsp) # 4-byte Spill movl $0, 12(%rsp) leaq 24(%rsp), %rdi callq hipEventCreate leaq 16(%rsp), %rdi callq hipEventCreate movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord leaq 48(%rsp), %rdi movq %r12, %rsi callq hipMalloc leaq 40(%rsp), %rdi movq %r12, %rsi callq hipMalloc leaq 32(%rsp), %rdi movq %r12, %rsi callq hipMalloc movq 48(%rsp), %rdi movq 176(%rsp), %rsi # 8-byte Reload movq %r12, %rdx movl $1, %ecx callq hipMemcpy movq 40(%rsp), %rdi movq 184(%rsp), %rsi # 8-byte Reload movq %r12, %rdx movl $1, %ecx callq hipMemcpy movl %ebp, %eax cltd idivl %ebx # kill: def $eax killed $eax def $rax movabsq $4294967296, %rcx # imm = 0x100000000 orq %rcx, %rax movq %r13, %rdi movl $1, %esi movq %rax, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_7 # %bb.6: # in Loop: Header=BB5_5 Depth=2 movq 48(%rsp), %rax movq 40(%rsp), %rcx movq 32(%rsp), %rdx movq %rax, 128(%rsp) movq %rcx, 120(%rsp) movq %rdx, 112(%rsp) leaq 128(%rsp), %rax movq %rax, 144(%rsp) leaq 120(%rsp), %rax movq %rax, 152(%rsp) leaq 112(%rsp), %rax movq %rax, 160(%rsp) leaq 96(%rsp), %rdi leaq 80(%rsp), %rsi leaq 72(%rsp), %rdx leaq 64(%rsp), %rcx callq __hipPopCallConfiguration movq 96(%rsp), %rsi movl 104(%rsp), %edx movq 80(%rsp), %rcx movl 88(%rsp), %r8d movl $_Z12CUDASummatorPdS_S_, %edi leaq 144(%rsp), %r9 pushq 64(%rsp) .cfi_adjust_cfa_offset 8 pushq 80(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 jmp .LBB5_7 .LBB5_14: movl $0, 12(%rsp) leaq 24(%rsp), %rdi movl %ecx, %ebx callq hipEventCreate leaq 16(%rsp), %rdi callq hipEventCreate movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movslq %ebx, %r12 shlq $3, %r12 leaq 48(%rsp), %rdi movq %r12, %rsi callq hipMalloc leaq 40(%rsp), %rdi movq %r12, %rsi callq hipMalloc leaq 32(%rsp), %rdi movq %r12, %rsi callq hipMalloc movq 48(%rsp), %rdi movq %rbp, %rsi movq %r12, %rdx movl $1, %ecx callq hipMemcpy movq 40(%rsp), %rdi movq %r13, %rsi movq %r12, %rdx movl $1, %ecx callq hipMemcpy movl count(%rip), %edi movl %ebx, %eax cltd idivl %edi # kill: def $eax killed $eax def $rax movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq %rax, %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_16 # %bb.15: movq 48(%rsp), %rax movq 40(%rsp), %rcx movq 32(%rsp), %rdx movq %rax, 128(%rsp) movq %rcx, 120(%rsp) movq %rdx, 112(%rsp) leaq 128(%rsp), %rax movq %rax, 144(%rsp) leaq 120(%rsp), %rax movq %rax, 152(%rsp) leaq 112(%rsp), %rax movq %rax, 160(%rsp) leaq 96(%rsp), %rdi leaq 80(%rsp), %rsi leaq 72(%rsp), %rdx leaq 64(%rsp), %rcx callq __hipPopCallConfiguration movq 96(%rsp), %rsi movl 104(%rsp), %edx movq 80(%rsp), %rcx movl 88(%rsp), %r8d leaq 144(%rsp), %r9 movl $_Z12CUDASummatorPdS_S_, %edi pushq 64(%rsp) .cfi_adjust_cfa_offset 8 pushq 80(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB5_16: movq 32(%rsp), %rsi movq %r15, %rdi movq %r12, %rdx movl $2, %ecx callq hipMemcpy movq 48(%rsp), %rdi callq hipFree movq 40(%rsp), %rdi callq hipFree movq 32(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 16(%rsp), %rdi callq hipEventSynchronize movq 24(%rsp), %rsi movq 16(%rsp), %rdx leaq 12(%rsp), %rdi callq hipEventElapsedTime movq 24(%rsp), %rdi callq hipEventDestroy movq 16(%rsp), %rdi callq hipEventDestroy movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero mulss .LCPI5_3(%rip), %xmm0 cvttss2si %xmm0, %eax jmp .LBB5_17 .LBB5_10: movl $11111, %eax # imm = 0x2B67 testl %ecx, %ecx jle .LBB5_17 # %bb.11: # %.lr.ph.preheader.i movl %ecx, %ebx xorl %r14d, %r14d .p2align 4, 0x90 .LBB5_12: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 movsd (%rbp,%r14,8), %xmm0 # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 callq sinf movss %xmm0, 60(%rsp) # 4-byte Spill movsd (%r13,%r14,8), %xmm0 # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 callq sinf addss 60(%rsp), %xmm0 # 4-byte Folded Reload cvtss2sd %xmm0, %xmm0 movsd %xmm0, (%r15,%r14,8) incq %r14 cmpq %r14, %rbx jne .LBB5_12 # %bb.13: movl $11111, %eax # imm = 0x2B67 jmp .LBB5_17 .LBB5_9: cvttss2si %xmm0, %eax movl %eax, count(%rip) .LBB5_17: # %_Z14SimpleSummatorPdS_S_i.exit addq $216, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size _Z12GPU_SummatorPdS_S_ii, .Lfunc_end5-_Z12GPU_SummatorPdS_S_ii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12CUDASummatorPdS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z13CUDASinusatorPdS_, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z13RangeSummatorPdS_S_i, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type _Z12CUDASummatorPdS_S_,@object # @_Z12CUDASummatorPdS_S_ .section .rodata,"a",@progbits .globl _Z12CUDASummatorPdS_S_ .p2align 3, 0x0 _Z12CUDASummatorPdS_S_: .quad _Z27__device_stub__CUDASummatorPdS_S_ .size _Z12CUDASummatorPdS_S_, 8 .type _Z13CUDASinusatorPdS_,@object # @_Z13CUDASinusatorPdS_ .globl _Z13CUDASinusatorPdS_ .p2align 3, 0x0 _Z13CUDASinusatorPdS_: .quad _Z28__device_stub__CUDASinusatorPdS_ .size _Z13CUDASinusatorPdS_, 8 .type _Z13RangeSummatorPdS_S_i,@object # @_Z13RangeSummatorPdS_S_i .globl _Z13RangeSummatorPdS_S_i .p2align 3, 0x0 _Z13RangeSummatorPdS_S_i: .quad _Z28__device_stub__RangeSummatorPdS_S_i .size _Z13RangeSummatorPdS_S_i, 8 .type count,@object # @count .data .globl count .p2align 2, 0x0 count: .long 100 # 0x64 .size count, 4 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z12CUDASummatorPdS_S_" .size .L__unnamed_1, 23 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z13CUDASinusatorPdS_" .size .L__unnamed_2, 22 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z13RangeSummatorPdS_S_i" .size .L__unnamed_3, 25 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__CUDASummatorPdS_S_ .addrsig_sym _Z28__device_stub__CUDASinusatorPdS_ .addrsig_sym _Z28__device_stub__RangeSummatorPdS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12CUDASummatorPdS_S_ .addrsig_sym _Z13CUDASinusatorPdS_ .addrsig_sym _Z13RangeSummatorPdS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> void bar(); __global__ void fooKernel() { printf("foo!\n"); } int main(){ bar(); fooKernel<<<1,1>>>(); cudaDeviceSynchronize(); std::cout << "foo done!\n"; return 0; }
code for sm_80 Function : _Z9fooKernelv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe20000000f00 */ /*0020*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */ /* 0x000fe200078e00ff */ /*0030*/ CS2R R6, SRZ ; /* 0x0000000000067805 */ /* 0x000fe2000001ff00 */ /*0040*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */ /* 0x000fe200078e00ff */ /*0050*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */ /* 0x00006c0000000a00 */ /*0060*/ LEPC R8 ; /* 0x000000000008734e */ /* 0x000fe40000000000 */ /*0070*/ MOV R11, 0xe0 ; /* 0x000000e0000b7802 */ /* 0x000fe40000000f00 */ /*0080*/ MOV R20, 0x60 ; /* 0x0000006000147802 */ /* 0x000fe40000000f00 */ /*0090*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*00a0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x001fc40000000f00 */ /*00b0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */ /* 0x000fc8000791e108 */ /*00c0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2509 */ /*00d0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */ /* 0x002fea0003c00000 */ /*00e0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <iostream> void bar(); __global__ void fooKernel() { printf("foo!\n"); } int main(){ bar(); fooKernel<<<1,1>>>(); cudaDeviceSynchronize(); std::cout << "foo done!\n"; return 0; }
.file "tmpxft_000840ed_00000000-6_foo.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z27__device_stub__Z9fooKernelvv .type _Z27__device_stub__Z9fooKernelvv, @function _Z27__device_stub__Z9fooKernelvv: .LFB3694: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z9fooKernelv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE3694: .size _Z27__device_stub__Z9fooKernelvv, .-_Z27__device_stub__Z9fooKernelvv .globl _Z9fooKernelv .type _Z9fooKernelv, @function _Z9fooKernelv: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z27__device_stub__Z9fooKernelvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _Z9fooKernelv, .-_Z9fooKernelv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "foo done!\n" .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 call _Z3barv@PLT movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: call cudaDeviceSynchronize@PLT leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z27__device_stub__Z9fooKernelvv jmp .L12 .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z9fooKernelv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z9fooKernelv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> void bar(); __global__ void fooKernel() { printf("foo!\n"); } int main(){ bar(); fooKernel<<<1,1>>>(); cudaDeviceSynchronize(); std::cout << "foo done!\n"; return 0; }
#include <hip/hip_runtime.h> #include <iostream> void bar(); __global__ void fooKernel() { printf("foo!\n"); } int main(){ bar(); fooKernel<<<1,1>>>(); hipDeviceSynchronize(); std::cout << "foo done!\n"; return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <iostream> void bar(); __global__ void fooKernel() { printf("foo!\n"); } int main(){ bar(); fooKernel<<<1,1>>>(); hipDeviceSynchronize(); std::cout << "foo done!\n"; return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9fooKernelv .globl _Z9fooKernelv .p2align 8 .type _Z9fooKernelv,@function _Z9fooKernelv: s_load_b64 s[2:3], s[0:1], 0x50 v_mbcnt_lo_u32_b32 v20, -1, 0 v_mov_b32_e32 v6, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_dual_mov_b32 v7, 0 :: v_dual_mov_b32 v4, v20 v_readfirstlane_b32 s0, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_eq_u32_e64 s0, s0, v4 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_6 v_mov_b32_e32 v0, 0 s_mov_b32 s4, exec_lo s_waitcnt lgkmcnt(0) global_load_b64 v[8:9], v0, s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv s_clause 0x1 global_load_b64 v[1:2], v0, s[2:3] offset:40 global_load_b64 v[5:6], v0, s[2:3] s_waitcnt vmcnt(1) v_and_b32_e32 v1, v1, v8 v_and_b32_e32 v2, v2, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_hi_u32 v3, v1, 24 v_mul_lo_u32 v2, v2, 24 v_mul_lo_u32 v1, v1, 24 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v2, v3, v2 s_waitcnt vmcnt(0) v_add_co_u32 v1, vcc_lo, v5, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v2, vcc_lo, v6, v2, vcc_lo global_load_b64 v[6:7], v[1:2], off glc s_waitcnt vmcnt(0) global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_cmpx_ne_u64_e64 v[6:7], v[8:9] s_cbranch_execz .LBB0_5 s_mov_b32 s5, 0 .p2align 6 .LBB0_3: s_sleep 1 s_clause 0x1 global_load_b64 v[1:2], v0, s[2:3] offset:40 global_load_b64 v[10:11], v0, s[2:3] v_dual_mov_b32 v9, v7 :: v_dual_mov_b32 v8, v6 s_waitcnt vmcnt(1) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_and_b32_e32 v1, v1, v8 v_and_b32_e32 v7, v2, v9 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[5:6], null, v1, 24, v[10:11] v_mov_b32_e32 v1, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[2:3], null, v7, 24, v[1:2] v_mov_b32_e32 v6, v2 global_load_b64 v[6:7], v[5:6], off glc s_waitcnt vmcnt(0) global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[8:9] s_or_b32 s5, vcc_lo, s5 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s5 s_cbranch_execnz .LBB0_3 s_or_b32 exec_lo, exec_lo, s5 .LBB0_5: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s4 .LBB0_6: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s1 v_mov_b32_e32 v5, 0 v_readfirstlane_b32 s4, v6 v_readfirstlane_b32 s5, v7 s_mov_b32 s8, exec_lo s_waitcnt lgkmcnt(0) s_clause 0x1 global_load_b64 v[8:9], v5, s[2:3] offset:40 global_load_b128 v[0:3], v5, s[2:3] s_waitcnt vmcnt(1) v_readfirstlane_b32 s6, v8 v_readfirstlane_b32 s7, v9 s_delay_alu instid0(VALU_DEP_1) s_and_b64 s[6:7], s[4:5], s[6:7] s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_8 v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, 0 s_mul_i32 s8, s7, 24 s_mul_hi_u32 s9, s6, 24 v_dual_mov_b32 v8, 2 :: v_dual_mov_b32 v9, 1 s_add_i32 s9, s9, s8 s_mul_i32 s8, s6, 24 s_waitcnt vmcnt(0) v_add_co_u32 v10, vcc_lo, v0, s8 v_add_co_ci_u32_e32 v11, vcc_lo, s9, v1, vcc_lo global_store_b128 v[10:11], v[6:9], off offset:8 .LBB0_8: s_or_b32 exec_lo, exec_lo, s1 s_lshl_b64 s[8:9], s[6:7], 12 v_lshlrev_b64 v[4:5], 6, v[4:5] s_waitcnt vmcnt(0) v_add_co_u32 v2, vcc_lo, v2, s8 v_add_co_ci_u32_e32 v7, vcc_lo, s9, v3, vcc_lo v_mov_b32_e32 v3, 0 s_mov_b32 s8, 0 s_delay_alu instid0(VALU_DEP_3) v_add_co_u32 v6, vcc_lo, v2, v4 v_mov_b32_e32 v2, 33 s_mov_b32 s9, s8 s_mov_b32 s10, s8 s_mov_b32 s11, s8 v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo v_mov_b32_e32 v4, v3 v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v8, s8 v_dual_mov_b32 v9, s9 :: v_dual_mov_b32 v10, s10 v_mov_b32_e32 v11, s11 s_clause 0x3 global_store_b128 v[6:7], v[2:5], off global_store_b128 v[6:7], v[8:11], off offset:16 global_store_b128 v[6:7], v[8:11], off offset:32 global_store_b128 v[6:7], v[8:11], off offset:48 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_16 v_dual_mov_b32 v10, 0 :: v_dual_mov_b32 v11, s4 v_mov_b32_e32 v12, s5 s_clause 0x1 global_load_b64 v[13:14], v10, s[2:3] offset:32 glc global_load_b64 v[2:3], v10, s[2:3] offset:40 s_waitcnt vmcnt(0) v_readfirstlane_b32 s8, v2 v_readfirstlane_b32 s9, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b64 s[8:9], s[8:9], s[4:5] s_mul_i32 s9, s9, 24 s_mul_hi_u32 s10, s8, 24 s_mul_i32 s8, s8, 24 s_add_i32 s10, s10, s9 v_add_co_u32 v8, vcc_lo, v0, s8 v_add_co_ci_u32_e32 v9, vcc_lo, s10, v1, vcc_lo s_mov_b32 s8, exec_lo global_store_b64 v[8:9], v[13:14], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[4:5], v10, v[11:14], s[2:3] offset:32 glc s_waitcnt vmcnt(0) v_cmpx_ne_u64_e64 v[4:5], v[13:14] s_cbranch_execz .LBB0_12 s_mov_b32 s9, 0 .LBB0_11: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5 s_sleep 1 global_store_b64 v[8:9], v[4:5], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[2:3], v10, v[2:5], s[2:3] offset:32 glc s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5] v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2 s_or_b32 s9, vcc_lo, s9 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s9 s_cbranch_execnz .LBB0_11 .LBB0_12: s_or_b32 exec_lo, exec_lo, s8 v_mov_b32_e32 v2, 0 s_mov_b32 s9, exec_lo s_mov_b32 s8, exec_lo v_mbcnt_lo_u32_b32 v4, s9, 0 global_load_b64 v[2:3], v2, s[2:3] offset:16 v_cmpx_eq_u32_e32 0, v4 s_cbranch_execz .LBB0_14 s_bcnt1_i32_b32 s9, s9 s_delay_alu instid0(SALU_CYCLE_1) v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9 s_waitcnt vmcnt(0) global_atomic_add_u64 v[2:3], v[4:5], off offset:8 .LBB0_14: s_or_b32 exec_lo, exec_lo, s8 s_waitcnt vmcnt(0) global_load_b64 v[4:5], v[2:3], off offset:16 s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5] s_cbranch_vccnz .LBB0_16 global_load_b32 v2, v[2:3], off offset:24 v_mov_b32_e32 v3, 0 s_waitcnt vmcnt(0) v_readfirstlane_b32 s8, v2 s_waitcnt_vscnt null, 0x0 global_store_b64 v[4:5], v[2:3], off s_and_b32 m0, s8, 0xff s_sendmsg sendmsg(MSG_INTERRUPT) .LBB0_16: s_or_b32 exec_lo, exec_lo, s1 s_mul_i32 s1, s7, 24 s_mul_hi_u32 s7, s6, 24 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) s_add_i32 s7, s7, s1 s_mul_i32 s1, s6, 24 v_add_co_u32 v0, vcc_lo, v0, s1 v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v0, 20 v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo s_branch .LBB0_20 .p2align 6 .LBB0_17: s_or_b32 exec_lo, exec_lo, s1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_readfirstlane_b32 s1, v2 s_cmp_eq_u32 s1, 0 s_cbranch_scc1 .LBB0_19 s_sleep 1 s_cbranch_execnz .LBB0_20 s_branch .LBB0_22 .p2align 6 .LBB0_19: s_branch .LBB0_22 .LBB0_20: v_mov_b32_e32 v2, 1 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_17 global_load_b32 v2, v[0:1], off glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_and_b32_e32 v2, 1, v2 s_branch .LBB0_17 .LBB0_22: global_load_b64 v[22:23], v[6:7], off s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_26 v_mov_b32_e32 v6, 0 s_clause 0x2 global_load_b64 v[2:3], v6, s[2:3] offset:40 global_load_b64 v[7:8], v6, s[2:3] offset:24 glc global_load_b64 v[4:5], v6, s[2:3] s_waitcnt vmcnt(2) v_add_co_u32 v9, vcc_lo, v2, 1 v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v9, s4 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9 v_and_b32_e32 v3, v1, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_and_b32_e32 v2, v0, v2 v_mul_lo_u32 v3, v3, 24 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mul_hi_u32 v9, v2, 24 v_mul_lo_u32 v2, v2, 24 v_add_nc_u32_e32 v3, v9, v3 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_add_co_u32 v4, vcc_lo, v4, v2 v_mov_b32_e32 v2, v7 v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo v_mov_b32_e32 v3, v8 global_store_b64 v[4:5], v[7:8], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc s_waitcnt vmcnt(0) v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8] s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_26 s_mov_b32 s0, 0 .LBB0_25: s_sleep 1 global_store_b64 v[4:5], v[2:3], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3] v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8 s_or_b32 s0, vcc_lo, s0 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s0 s_cbranch_execnz .LBB0_25 .LBB0_26: s_or_b32 exec_lo, exec_lo, s1 s_getpc_b64 s[4:5] s_add_u32 s4, s4, .str@rel32@lo+4 s_addc_u32 s5, s5, .str@rel32@hi+12 s_mov_b32 s0, -1 s_cmp_lg_u64 s[4:5], 0 s_cbranch_scc0 .LBB0_105 s_waitcnt vmcnt(0) v_dual_mov_b32 v1, v23 :: v_dual_and_b32 v0, -3, v22 v_mov_b32_e32 v25, 0 s_mov_b64 s[6:7], 6 s_branch .LBB0_29 .LBB0_28: s_or_b32 exec_lo, exec_lo, s1 s_sub_u32 s6, s6, s8 s_subb_u32 s7, s7, s9 s_add_u32 s4, s4, s8 s_addc_u32 s5, s5, s9 s_cmp_lg_u64 s[6:7], 0 s_cbranch_scc0 .LBB0_104 .LBB0_29: v_cmp_lt_u64_e64 s0, s[6:7], 56 s_delay_alu instid0(VALU_DEP_1) s_and_b32 s0, s0, exec_lo s_cselect_b32 s8, s6, 56 s_cselect_b32 s9, s7, 0 s_cmp_gt_u32 s8, 7 s_mov_b32 s0, -1 s_cbranch_scc1 .LBB0_34 v_mov_b32_e32 v2, 0 v_mov_b32_e32 v3, 0 s_cmp_eq_u32 s8, 0 s_cbranch_scc1 .LBB0_33 s_lshl_b64 s[0:1], s[8:9], 3 s_mov_b64 s[10:11], 0 s_mov_b64 s[12:13], s[4:5] .LBB0_32: global_load_u8 v4, v25, s[12:13] s_waitcnt vmcnt(0) v_and_b32_e32 v24, 0xffff, v4 s_delay_alu instid0(VALU_DEP_1) v_lshlrev_b64 v[4:5], s10, v[24:25] s_add_u32 s10, s10, 8 s_addc_u32 s11, s11, 0 s_add_u32 s12, s12, 1 s_addc_u32 s13, s13, 0 s_cmp_lg_u32 s0, s10 v_or_b32_e32 v2, v4, v2 v_or_b32_e32 v3, v5, v3 s_cbranch_scc1 .LBB0_32 .LBB0_33: s_mov_b32 s0, 0 s_mov_b32 s15, 0 .LBB0_34: s_and_not1_b32 vcc_lo, exec_lo, s0 s_mov_b64 s[0:1], s[4:5] s_cbranch_vccnz .LBB0_36 global_load_b64 v[2:3], v25, s[4:5] s_add_i32 s15, s8, -8 s_add_u32 s0, s4, 8 s_addc_u32 s1, s5, 0 .LBB0_36: s_cmp_gt_u32 s15, 7 s_cbranch_scc1 .LBB0_41 v_mov_b32_e32 v4, 0 v_mov_b32_e32 v5, 0 s_cmp_eq_u32 s15, 0 s_cbranch_scc1 .LBB0_40 s_mov_b64 s[10:11], 0 s_mov_b64 s[12:13], 0 .LBB0_39: s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s16, s0, s12 s_addc_u32 s17, s1, s13 s_add_u32 s12, s12, 1 global_load_u8 v6, v25, s[16:17] s_addc_u32 s13, s13, 0 s_waitcnt vmcnt(0) v_and_b32_e32 v24, 0xffff, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_lshlrev_b64 v[6:7], s10, v[24:25] s_add_u32 s10, s10, 8 s_addc_u32 s11, s11, 0 s_cmp_lg_u32 s15, s12 v_or_b32_e32 v4, v6, v4 s_delay_alu instid0(VALU_DEP_2) v_or_b32_e32 v5, v7, v5 s_cbranch_scc1 .LBB0_39 .LBB0_40: s_mov_b32 s14, 0 s_cbranch_execz .LBB0_42 s_branch .LBB0_43 .LBB0_41: .LBB0_42: global_load_b64 v[4:5], v25, s[0:1] s_add_i32 s14, s15, -8 s_add_u32 s0, s0, 8 s_addc_u32 s1, s1, 0 .LBB0_43: s_cmp_gt_u32 s14, 7 s_cbranch_scc1 .LBB0_48 v_mov_b32_e32 v6, 0 v_mov_b32_e32 v7, 0 s_cmp_eq_u32 s14, 0 s_cbranch_scc1 .LBB0_47 s_mov_b64 s[10:11], 0 s_mov_b64 s[12:13], 0 .LBB0_46: s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s16, s0, s12 s_addc_u32 s17, s1, s13 s_add_u32 s12, s12, 1 global_load_u8 v8, v25, s[16:17] s_addc_u32 s13, s13, 0 s_waitcnt vmcnt(0) v_and_b32_e32 v24, 0xffff, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_lshlrev_b64 v[8:9], s10, v[24:25] s_add_u32 s10, s10, 8 s_addc_u32 s11, s11, 0 s_cmp_lg_u32 s14, s12 v_or_b32_e32 v6, v8, v6 s_delay_alu instid0(VALU_DEP_2) v_or_b32_e32 v7, v9, v7 s_cbranch_scc1 .LBB0_46 .LBB0_47: s_mov_b32 s15, 0 s_cbranch_execz .LBB0_49 s_branch .LBB0_50 .LBB0_48: .LBB0_49: global_load_b64 v[6:7], v25, s[0:1] s_add_i32 s15, s14, -8 s_add_u32 s0, s0, 8 s_addc_u32 s1, s1, 0 .LBB0_50: s_cmp_gt_u32 s15, 7 s_cbranch_scc1 .LBB0_55 v_mov_b32_e32 v8, 0 v_mov_b32_e32 v9, 0 s_cmp_eq_u32 s15, 0 s_cbranch_scc1 .LBB0_54 s_mov_b64 s[10:11], 0 s_mov_b64 s[12:13], 0 .LBB0_53: s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s16, s0, s12 s_addc_u32 s17, s1, s13 s_add_u32 s12, s12, 1 global_load_u8 v10, v25, s[16:17] s_addc_u32 s13, s13, 0 s_waitcnt vmcnt(0) v_and_b32_e32 v24, 0xffff, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_lshlrev_b64 v[10:11], s10, v[24:25] s_add_u32 s10, s10, 8 s_addc_u32 s11, s11, 0 s_cmp_lg_u32 s15, s12 v_or_b32_e32 v8, v10, v8 s_delay_alu instid0(VALU_DEP_2) v_or_b32_e32 v9, v11, v9 s_cbranch_scc1 .LBB0_53 .LBB0_54: s_mov_b32 s14, 0 s_cbranch_execz .LBB0_56 s_branch .LBB0_57 .LBB0_55: .LBB0_56: global_load_b64 v[8:9], v25, s[0:1] s_add_i32 s14, s15, -8 s_add_u32 s0, s0, 8 s_addc_u32 s1, s1, 0 .LBB0_57: s_cmp_gt_u32 s14, 7 s_cbranch_scc1 .LBB0_62 v_mov_b32_e32 v10, 0 v_mov_b32_e32 v11, 0 s_cmp_eq_u32 s14, 0 s_cbranch_scc1 .LBB0_61 s_mov_b64 s[10:11], 0 s_mov_b64 s[12:13], 0 .LBB0_60: s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s16, s0, s12 s_addc_u32 s17, s1, s13 s_add_u32 s12, s12, 1 global_load_u8 v12, v25, s[16:17] s_addc_u32 s13, s13, 0 s_waitcnt vmcnt(0) v_and_b32_e32 v24, 0xffff, v12 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_lshlrev_b64 v[12:13], s10, v[24:25] s_add_u32 s10, s10, 8 s_addc_u32 s11, s11, 0 s_cmp_lg_u32 s14, s12 v_or_b32_e32 v10, v12, v10 s_delay_alu instid0(VALU_DEP_2) v_or_b32_e32 v11, v13, v11 s_cbranch_scc1 .LBB0_60 .LBB0_61: s_mov_b32 s15, 0 s_cbranch_execz .LBB0_63 s_branch .LBB0_64 .LBB0_62: .LBB0_63: global_load_b64 v[10:11], v25, s[0:1] s_add_i32 s15, s14, -8 s_add_u32 s0, s0, 8 s_addc_u32 s1, s1, 0 .LBB0_64: s_cmp_gt_u32 s15, 7 s_cbranch_scc1 .LBB0_69 v_mov_b32_e32 v12, 0 v_mov_b32_e32 v13, 0 s_cmp_eq_u32 s15, 0 s_cbranch_scc1 .LBB0_68 s_mov_b64 s[10:11], 0 s_mov_b64 s[12:13], 0 .LBB0_67: s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s16, s0, s12 s_addc_u32 s17, s1, s13 s_add_u32 s12, s12, 1 global_load_u8 v14, v25, s[16:17] s_addc_u32 s13, s13, 0 s_waitcnt vmcnt(0) v_and_b32_e32 v24, 0xffff, v14 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_lshlrev_b64 v[14:15], s10, v[24:25] s_add_u32 s10, s10, 8 s_addc_u32 s11, s11, 0 s_cmp_lg_u32 s15, s12 v_or_b32_e32 v12, v14, v12 s_delay_alu instid0(VALU_DEP_2) v_or_b32_e32 v13, v15, v13 s_cbranch_scc1 .LBB0_67 .LBB0_68: s_mov_b32 s14, 0 s_cbranch_execz .LBB0_70 s_branch .LBB0_71 .LBB0_69: .LBB0_70: global_load_b64 v[12:13], v25, s[0:1] s_add_i32 s14, s15, -8 s_add_u32 s0, s0, 8 s_addc_u32 s1, s1, 0 .LBB0_71: s_cmp_gt_u32 s14, 7 s_cbranch_scc1 .LBB0_76 v_mov_b32_e32 v14, 0 v_mov_b32_e32 v15, 0 s_cmp_eq_u32 s14, 0 s_cbranch_scc1 .LBB0_75 s_mov_b64 s[10:11], 0 s_mov_b64 s[12:13], s[0:1] .LBB0_74: global_load_u8 v16, v25, s[12:13] s_add_i32 s14, s14, -1 s_waitcnt vmcnt(0) v_and_b32_e32 v24, 0xffff, v16 s_delay_alu instid0(VALU_DEP_1) v_lshlrev_b64 v[16:17], s10, v[24:25] s_add_u32 s10, s10, 8 s_addc_u32 s11, s11, 0 s_add_u32 s12, s12, 1 s_addc_u32 s13, s13, 0 s_cmp_lg_u32 s14, 0 v_or_b32_e32 v14, v16, v14 v_or_b32_e32 v15, v17, v15 s_cbranch_scc1 .LBB0_74 .LBB0_75: s_cbranch_execz .LBB0_77 s_branch .LBB0_78 .LBB0_76: .LBB0_77: global_load_b64 v[14:15], v25, s[0:1] .LBB0_78: v_mov_b32_e32 v24, v20 v_mov_b32_e32 v26, 0 v_mov_b32_e32 v27, 0 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_readfirstlane_b32 s0, v24 v_cmp_eq_u32_e64 s0, s0, v24 s_delay_alu instid0(VALU_DEP_1) s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_84 global_load_b64 v[18:19], v25, s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv s_clause 0x1 global_load_b64 v[16:17], v25, s[2:3] offset:40 global_load_b64 v[26:27], v25, s[2:3] s_mov_b32 s10, exec_lo s_waitcnt vmcnt(1) v_and_b32_e32 v17, v17, v19 v_and_b32_e32 v16, v16, v18 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_lo_u32 v17, v17, 24 v_mul_hi_u32 v21, v16, 24 v_mul_lo_u32 v16, v16, 24 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v17, v21, v17 s_waitcnt vmcnt(0) v_add_co_u32 v16, vcc_lo, v26, v16 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v17, vcc_lo, v27, v17, vcc_lo global_load_b64 v[16:17], v[16:17], off glc s_waitcnt vmcnt(0) global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_cmpx_ne_u64_e64 v[26:27], v[18:19] s_cbranch_execz .LBB0_83 s_mov_b32 s11, 0 .p2align 6 .LBB0_81: s_sleep 1 s_clause 0x1 global_load_b64 v[16:17], v25, s[2:3] offset:40 global_load_b64 v[28:29], v25, s[2:3] v_dual_mov_b32 v18, v26 :: v_dual_mov_b32 v19, v27 s_waitcnt vmcnt(1) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_and_b32_e32 v16, v16, v18 s_waitcnt vmcnt(0) v_mad_u64_u32 v[26:27], null, v16, 24, v[28:29] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_mov_b32 v16, v27 :: v_dual_and_b32 v17, v17, v19 v_mad_u64_u32 v[27:28], null, v17, 24, v[16:17] global_load_b64 v[16:17], v[26:27], off glc s_waitcnt vmcnt(0) global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_cmp_eq_u64_e32 vcc_lo, v[26:27], v[18:19] s_or_b32 s11, vcc_lo, s11 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s11 s_cbranch_execnz .LBB0_81 s_or_b32 exec_lo, exec_lo, s11 .LBB0_83: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s10 .LBB0_84: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s1 s_clause 0x1 global_load_b64 v[28:29], v25, s[2:3] offset:40 global_load_b128 v[16:19], v25, s[2:3] v_readfirstlane_b32 s10, v26 v_readfirstlane_b32 s11, v27 s_mov_b32 s14, exec_lo s_waitcnt vmcnt(1) v_readfirstlane_b32 s12, v28 v_readfirstlane_b32 s13, v29 s_delay_alu instid0(VALU_DEP_1) s_and_b64 s[12:13], s[10:11], s[12:13] s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_86 v_dual_mov_b32 v26, s14 :: v_dual_mov_b32 v27, 0 s_mul_i32 s14, s13, 24 s_mul_hi_u32 s15, s12, 24 v_dual_mov_b32 v28, 2 :: v_dual_mov_b32 v29, 1 s_add_i32 s15, s15, s14 s_mul_i32 s14, s12, 24 s_waitcnt vmcnt(0) v_add_co_u32 v30, vcc_lo, v16, s14 v_add_co_ci_u32_e32 v31, vcc_lo, s15, v17, vcc_lo global_store_b128 v[30:31], v[26:29], off offset:8 .LBB0_86: s_or_b32 exec_lo, exec_lo, s1 v_cmp_gt_u64_e64 vcc_lo, s[6:7], 56 v_or_b32_e32 v21, 2, v0 s_lshl_b64 s[14:15], s[12:13], 12 v_lshlrev_b64 v[26:27], 6, v[24:25] s_lshl_b32 s1, s8, 2 s_delay_alu instid0(SALU_CYCLE_1) s_add_i32 s1, s1, 28 v_cndmask_b32_e32 v0, v21, v0, vcc_lo s_waitcnt vmcnt(0) v_add_co_u32 v18, vcc_lo, v18, s14 v_add_co_ci_u32_e32 v19, vcc_lo, s15, v19, vcc_lo s_and_b32 s1, s1, 0x1e0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_add_co_u32 v18, vcc_lo, v18, v26 v_and_or_b32 v0, v0, 0xffffff1f, s1 v_add_co_ci_u32_e32 v19, vcc_lo, v19, v27, vcc_lo s_clause 0x3 global_store_b128 v[18:19], v[0:3], off global_store_b128 v[18:19], v[4:7], off offset:16 global_store_b128 v[18:19], v[8:11], off offset:32 global_store_b128 v[18:19], v[12:15], off offset:48 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_94 s_clause 0x1 global_load_b64 v[8:9], v25, s[2:3] offset:32 glc global_load_b64 v[0:1], v25, s[2:3] offset:40 v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11 s_waitcnt vmcnt(0) v_readfirstlane_b32 s14, v0 v_readfirstlane_b32 s15, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b64 s[14:15], s[14:15], s[10:11] s_mul_i32 s15, s15, 24 s_mul_hi_u32 s16, s14, 24 s_mul_i32 s14, s14, 24 s_add_i32 s16, s16, s15 v_add_co_u32 v4, vcc_lo, v16, s14 v_add_co_ci_u32_e32 v5, vcc_lo, s16, v17, vcc_lo s_mov_b32 s14, exec_lo global_store_b64 v[4:5], v[8:9], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[2:3], v25, v[6:9], s[2:3] offset:32 glc s_waitcnt vmcnt(0) v_cmpx_ne_u64_e64 v[2:3], v[8:9] s_cbranch_execz .LBB0_90 s_mov_b32 s15, 0 .LBB0_89: v_dual_mov_b32 v0, s10 :: v_dual_mov_b32 v1, s11 s_sleep 1 global_store_b64 v[4:5], v[2:3], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[0:1], v25, v[0:3], s[2:3] offset:32 glc s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3] v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 s_or_b32 s15, vcc_lo, s15 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s15 s_cbranch_execnz .LBB0_89 .LBB0_90: s_or_b32 exec_lo, exec_lo, s14 global_load_b64 v[0:1], v25, s[2:3] offset:16 s_mov_b32 s15, exec_lo s_mov_b32 s14, exec_lo v_mbcnt_lo_u32_b32 v2, s15, 0 s_delay_alu instid0(VALU_DEP_1) v_cmpx_eq_u32_e32 0, v2 s_cbranch_execz .LBB0_92 s_bcnt1_i32_b32 s15, s15 s_delay_alu instid0(SALU_CYCLE_1) v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v2, s15 s_waitcnt vmcnt(0) global_atomic_add_u64 v[0:1], v[2:3], off offset:8 .LBB0_92: s_or_b32 exec_lo, exec_lo, s14 s_waitcnt vmcnt(0) global_load_b64 v[2:3], v[0:1], off offset:16 s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3] s_cbranch_vccnz .LBB0_94 global_load_b32 v24, v[0:1], off offset:24 s_waitcnt vmcnt(0) v_readfirstlane_b32 s14, v24 s_waitcnt_vscnt null, 0x0 global_store_b64 v[2:3], v[24:25], off s_and_b32 m0, s14, 0xff s_sendmsg sendmsg(MSG_INTERRUPT) .LBB0_94: s_or_b32 exec_lo, exec_lo, s1 s_mul_i32 s1, s13, 24 s_mul_hi_u32 s13, s12, 24 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) s_add_i32 s13, s13, s1 s_mul_i32 s1, s12, 24 v_add_co_u32 v0, vcc_lo, v16, s1 v_add_co_ci_u32_e32 v1, vcc_lo, s13, v17, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v0, 20 v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo s_branch .LBB0_98 .p2align 6 .LBB0_95: s_or_b32 exec_lo, exec_lo, s1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_readfirstlane_b32 s1, v2 s_cmp_eq_u32 s1, 0 s_cbranch_scc1 .LBB0_97 s_sleep 1 s_cbranch_execnz .LBB0_98 s_branch .LBB0_100 .p2align 6 .LBB0_97: s_branch .LBB0_100 .LBB0_98: v_mov_b32_e32 v2, 1 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_95 global_load_b32 v2, v[0:1], off glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_and_b32_e32 v2, 1, v2 s_branch .LBB0_95 .LBB0_100: global_load_b64 v[0:1], v[18:19], off s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_28 s_clause 0x2 global_load_b64 v[4:5], v25, s[2:3] offset:40 global_load_b64 v[8:9], v25, s[2:3] offset:24 glc global_load_b64 v[6:7], v25, s[2:3] s_waitcnt vmcnt(2) v_add_co_u32 v10, vcc_lo, v4, 1 v_add_co_ci_u32_e32 v11, vcc_lo, 0, v5, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, v10, s10 v_add_co_ci_u32_e32 v3, vcc_lo, s11, v11, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3] v_dual_cndmask_b32 v3, v3, v11 :: v_dual_cndmask_b32 v2, v2, v10 v_and_b32_e32 v5, v3, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_and_b32_e32 v4, v2, v4 v_mul_hi_u32 v10, v4, 24 v_mul_lo_u32 v4, v4, 24 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_add_co_u32 v6, vcc_lo, v6, v4 v_mov_b32_e32 v4, v8 v_mul_lo_u32 v5, v5, 24 v_add_nc_u32_e32 v5, v10, v5 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo v_mov_b32_e32 v5, v9 global_store_b64 v[6:7], v[8:9], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[4:5], v25, v[2:5], s[2:3] offset:24 glc s_waitcnt vmcnt(0) v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[8:9] s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_28 s_mov_b32 s0, 0 .LBB0_103: s_sleep 1 global_store_b64 v[6:7], v[4:5], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[8:9], v25, v[2:5], s[2:3] offset:24 glc s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, v[8:9], v[4:5] v_dual_mov_b32 v4, v8 :: v_dual_mov_b32 v5, v9 s_or_b32 s0, vcc_lo, s0 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s0 s_cbranch_execnz .LBB0_103 s_branch .LBB0_28 .LBB0_104: s_mov_b32 s0, 0 .LBB0_105: s_delay_alu instid0(SALU_CYCLE_1) s_and_b32 vcc_lo, exec_lo, s0 s_cbranch_vccz .LBB0_132 v_readfirstlane_b32 s0, v20 v_mov_b32_e32 v4, 0 v_mov_b32_e32 v5, 0 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_eq_u32_e64 s0, s0, v20 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_112 s_waitcnt vmcnt(0) v_mov_b32_e32 v0, 0 s_mov_b32 s4, exec_lo global_load_b64 v[6:7], v0, s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv s_clause 0x1 global_load_b64 v[1:2], v0, s[2:3] offset:40 global_load_b64 v[3:4], v0, s[2:3] s_waitcnt vmcnt(1) v_and_b32_e32 v1, v1, v6 v_and_b32_e32 v2, v2, v7 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_hi_u32 v5, v1, 24 v_mul_lo_u32 v2, v2, 24 v_mul_lo_u32 v1, v1, 24 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v2, v5, v2 s_waitcnt vmcnt(0) v_add_co_u32 v1, vcc_lo, v3, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v2, vcc_lo, v4, v2, vcc_lo global_load_b64 v[4:5], v[1:2], off glc s_waitcnt vmcnt(0) global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_cmpx_ne_u64_e64 v[4:5], v[6:7] s_cbranch_execz .LBB0_111 s_mov_b32 s5, 0 .p2align 6 .LBB0_109: s_sleep 1 s_clause 0x1 global_load_b64 v[1:2], v0, s[2:3] offset:40 global_load_b64 v[8:9], v0, s[2:3] v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4 s_waitcnt vmcnt(1) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_and_b32_e32 v1, v1, v6 s_waitcnt vmcnt(0) v_mad_u64_u32 v[3:4], null, v1, 24, v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_mov_b32 v1, v4 :: v_dual_and_b32 v2, v2, v7 v_mad_u64_u32 v[4:5], null, v2, 24, v[1:2] global_load_b64 v[4:5], v[3:4], off glc s_waitcnt vmcnt(0) global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7] s_or_b32 s5, vcc_lo, s5 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s5 s_cbranch_execnz .LBB0_109 s_or_b32 exec_lo, exec_lo, s5 .LBB0_111: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s4 .LBB0_112: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s1 v_mov_b32_e32 v21, 0 v_readfirstlane_b32 s4, v4 v_readfirstlane_b32 s5, v5 s_mov_b32 s8, exec_lo s_clause 0x1 global_load_b64 v[6:7], v21, s[2:3] offset:40 global_load_b128 v[0:3], v21, s[2:3] s_waitcnt vmcnt(1) v_readfirstlane_b32 s6, v6 v_readfirstlane_b32 s7, v7 s_delay_alu instid0(VALU_DEP_1) s_and_b64 s[6:7], s[4:5], s[6:7] s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_114 v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, 0 s_mul_i32 s8, s7, 24 s_mul_hi_u32 s9, s6, 24 v_dual_mov_b32 v6, 2 :: v_dual_mov_b32 v7, 1 s_add_i32 s9, s9, s8 s_mul_i32 s8, s6, 24 s_waitcnt vmcnt(0) v_add_co_u32 v8, vcc_lo, v0, s8 v_add_co_ci_u32_e32 v9, vcc_lo, s9, v1, vcc_lo global_store_b128 v[8:9], v[4:7], off offset:8 .LBB0_114: s_or_b32 exec_lo, exec_lo, s1 s_lshl_b64 s[8:9], s[6:7], 12 v_and_or_b32 v22, v22, 0xffffff1d, 34 s_waitcnt vmcnt(0) v_add_co_u32 v4, vcc_lo, v2, s8 v_add_co_ci_u32_e32 v5, vcc_lo, s9, v3, vcc_lo v_lshlrev_b64 v[2:3], 6, v[20:21] s_mov_b32 s8, 0 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_mov_b32 s9, s8 s_mov_b32 s10, s8 s_mov_b32 s11, s8 v_add_co_u32 v8, vcc_lo, v4, v2 v_mov_b32_e32 v6, 0 v_add_co_ci_u32_e32 v9, vcc_lo, v5, v3, vcc_lo v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v5, s11 v_dual_mov_b32 v3, s9 :: v_dual_mov_b32 v4, s10 s_delay_alu instid0(VALU_DEP_4) v_mov_b32_e32 v7, v6 s_clause 0x4 global_store_b64 v[8:9], v[22:23], off global_store_b128 v[8:9], v[2:5], off offset:8 global_store_b128 v[8:9], v[2:5], off offset:24 global_store_b128 v[8:9], v[2:5], off offset:40 global_store_b64 v[8:9], v[6:7], off offset:56 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_122 v_dual_mov_b32 v8, 0 :: v_dual_mov_b32 v9, s4 v_mov_b32_e32 v10, s5 s_clause 0x1 global_load_b64 v[11:12], v8, s[2:3] offset:32 glc global_load_b64 v[2:3], v8, s[2:3] offset:40 s_waitcnt vmcnt(0) v_readfirstlane_b32 s8, v2 v_readfirstlane_b32 s9, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b64 s[8:9], s[8:9], s[4:5] s_mul_i32 s9, s9, 24 s_mul_hi_u32 s10, s8, 24 s_mul_i32 s8, s8, 24 s_add_i32 s10, s10, s9 v_add_co_u32 v6, vcc_lo, v0, s8 v_add_co_ci_u32_e32 v7, vcc_lo, s10, v1, vcc_lo s_mov_b32 s8, exec_lo global_store_b64 v[6:7], v[11:12], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[4:5], v8, v[9:12], s[2:3] offset:32 glc s_waitcnt vmcnt(0) v_cmpx_ne_u64_e64 v[4:5], v[11:12] s_cbranch_execz .LBB0_118 s_mov_b32 s9, 0 .LBB0_117: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5 s_sleep 1 global_store_b64 v[6:7], v[4:5], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[2:3], v8, v[2:5], s[2:3] offset:32 glc s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5] v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2 s_or_b32 s9, vcc_lo, s9 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s9 s_cbranch_execnz .LBB0_117 .LBB0_118: s_or_b32 exec_lo, exec_lo, s8 v_mov_b32_e32 v2, 0 s_mov_b32 s9, exec_lo s_mov_b32 s8, exec_lo v_mbcnt_lo_u32_b32 v4, s9, 0 global_load_b64 v[2:3], v2, s[2:3] offset:16 v_cmpx_eq_u32_e32 0, v4 s_cbranch_execz .LBB0_120 s_bcnt1_i32_b32 s9, s9 s_delay_alu instid0(SALU_CYCLE_1) v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9 s_waitcnt vmcnt(0) global_atomic_add_u64 v[2:3], v[4:5], off offset:8 .LBB0_120: s_or_b32 exec_lo, exec_lo, s8 s_waitcnt vmcnt(0) global_load_b64 v[4:5], v[2:3], off offset:16 s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5] s_cbranch_vccnz .LBB0_122 global_load_b32 v2, v[2:3], off offset:24 v_mov_b32_e32 v3, 0 s_waitcnt vmcnt(0) v_readfirstlane_b32 s8, v2 s_waitcnt_vscnt null, 0x0 global_store_b64 v[4:5], v[2:3], off s_and_b32 m0, s8, 0xff s_sendmsg sendmsg(MSG_INTERRUPT) .LBB0_122: s_or_b32 exec_lo, exec_lo, s1 s_mul_i32 s1, s7, 24 s_mul_hi_u32 s7, s6, 24 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) s_add_i32 s7, s7, s1 s_mul_i32 s1, s6, 24 v_add_co_u32 v0, vcc_lo, v0, s1 v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v0, 20 v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo s_branch .LBB0_126 .p2align 6 .LBB0_123: s_or_b32 exec_lo, exec_lo, s1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_readfirstlane_b32 s1, v2 s_cmp_eq_u32 s1, 0 s_cbranch_scc1 .LBB0_125 s_sleep 1 s_cbranch_execnz .LBB0_126 s_branch .LBB0_128 .p2align 6 .LBB0_125: s_branch .LBB0_128 .LBB0_126: v_mov_b32_e32 v2, 1 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_123 global_load_b32 v2, v[0:1], off glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_and_b32_e32 v2, 1, v2 s_branch .LBB0_123 .LBB0_128: s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_132 v_mov_b32_e32 v6, 0 s_clause 0x2 global_load_b64 v[2:3], v6, s[2:3] offset:40 global_load_b64 v[7:8], v6, s[2:3] offset:24 glc global_load_b64 v[4:5], v6, s[2:3] s_waitcnt vmcnt(2) v_add_co_u32 v9, vcc_lo, v2, 1 v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v9, s4 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9 v_and_b32_e32 v3, v1, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_and_b32_e32 v2, v0, v2 v_mul_lo_u32 v3, v3, 24 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mul_hi_u32 v9, v2, 24 v_mul_lo_u32 v2, v2, 24 v_add_nc_u32_e32 v3, v9, v3 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_add_co_u32 v4, vcc_lo, v4, v2 v_mov_b32_e32 v2, v7 v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo v_mov_b32_e32 v3, v8 global_store_b64 v[4:5], v[7:8], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc s_waitcnt vmcnt(0) v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8] s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_132 s_mov_b32 s0, 0 .LBB0_131: s_sleep 1 global_store_b64 v[4:5], v[2:3], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3] v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8 s_or_b32 s0, vcc_lo, s0 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s0 s_cbranch_execnz .LBB0_131 .LBB0_132: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9fooKernelv .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 256 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 32 .amdhsa_next_free_sgpr 18 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9fooKernelv, .Lfunc_end0-_Z9fooKernelv .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type .str,@object .section .rodata.str1.1,"aMS",@progbits,1 .str: .asciz "foo!\n" .size .str, 6 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: hidden_block_count_x - .offset: 4 .size: 4 .value_kind: hidden_block_count_y - .offset: 8 .size: 4 .value_kind: hidden_block_count_z - .offset: 12 .size: 2 .value_kind: hidden_group_size_x - .offset: 14 .size: 2 .value_kind: hidden_group_size_y - .offset: 16 .size: 2 .value_kind: hidden_group_size_z - .offset: 18 .size: 2 .value_kind: hidden_remainder_x - .offset: 20 .size: 2 .value_kind: hidden_remainder_y - .offset: 22 .size: 2 .value_kind: hidden_remainder_z - .offset: 40 .size: 8 .value_kind: hidden_global_offset_x - .offset: 48 .size: 8 .value_kind: hidden_global_offset_y - .offset: 56 .size: 8 .value_kind: hidden_global_offset_z - .offset: 64 .size: 2 .value_kind: hidden_grid_dims - .offset: 80 .size: 8 .value_kind: hidden_hostcall_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 256 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9fooKernelv .private_segment_fixed_size: 0 .sgpr_count: 20 .sgpr_spill_count: 0 .symbol: _Z9fooKernelv.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 32 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <iostream> void bar(); __global__ void fooKernel() { printf("foo!\n"); } int main(){ bar(); fooKernel<<<1,1>>>(); hipDeviceSynchronize(); std::cout << "foo done!\n"; return 0; }
.text .file "foo.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z24__device_stub__fooKernelv # -- Begin function _Z24__device_stub__fooKernelv .p2align 4, 0x90 .type _Z24__device_stub__fooKernelv,@function _Z24__device_stub__fooKernelv: # @_Z24__device_stub__fooKernelv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z9fooKernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z24__device_stub__fooKernelv, .Lfunc_end0-_Z24__device_stub__fooKernelv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 callq _Z3barv movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z9fooKernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceSynchronize movl $_ZSt4cout, %edi movl $.L.str, %esi movl $10, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9fooKernelv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z9fooKernelv,@object # @_Z9fooKernelv .section .rodata,"a",@progbits .globl _Z9fooKernelv .p2align 3, 0x0 _Z9fooKernelv: .quad _Z24__device_stub__fooKernelv .size _Z9fooKernelv, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "foo done!\n" .size .L.str, 11 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z9fooKernelv" .size .L__unnamed_1, 14 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__fooKernelv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9fooKernelv .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z9fooKernelv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe20000000f00 */ /*0020*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */ /* 0x000fe200078e00ff */ /*0030*/ CS2R R6, SRZ ; /* 0x0000000000067805 */ /* 0x000fe2000001ff00 */ /*0040*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */ /* 0x000fe200078e00ff */ /*0050*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */ /* 0x00006c0000000a00 */ /*0060*/ LEPC R8 ; /* 0x000000000008734e */ /* 0x000fe40000000000 */ /*0070*/ MOV R11, 0xe0 ; /* 0x000000e0000b7802 */ /* 0x000fe40000000f00 */ /*0080*/ MOV R20, 0x60 ; /* 0x0000006000147802 */ /* 0x000fe40000000f00 */ /*0090*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*00a0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x001fc40000000f00 */ /*00b0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */ /* 0x000fc8000791e108 */ /*00c0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2509 */ /*00d0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */ /* 0x002fea0003c00000 */ /*00e0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9fooKernelv .globl _Z9fooKernelv .p2align 8 .type _Z9fooKernelv,@function _Z9fooKernelv: s_load_b64 s[2:3], s[0:1], 0x50 v_mbcnt_lo_u32_b32 v20, -1, 0 v_mov_b32_e32 v6, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_dual_mov_b32 v7, 0 :: v_dual_mov_b32 v4, v20 v_readfirstlane_b32 s0, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_eq_u32_e64 s0, s0, v4 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_6 v_mov_b32_e32 v0, 0 s_mov_b32 s4, exec_lo s_waitcnt lgkmcnt(0) global_load_b64 v[8:9], v0, s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv s_clause 0x1 global_load_b64 v[1:2], v0, s[2:3] offset:40 global_load_b64 v[5:6], v0, s[2:3] s_waitcnt vmcnt(1) v_and_b32_e32 v1, v1, v8 v_and_b32_e32 v2, v2, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_hi_u32 v3, v1, 24 v_mul_lo_u32 v2, v2, 24 v_mul_lo_u32 v1, v1, 24 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v2, v3, v2 s_waitcnt vmcnt(0) v_add_co_u32 v1, vcc_lo, v5, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v2, vcc_lo, v6, v2, vcc_lo global_load_b64 v[6:7], v[1:2], off glc s_waitcnt vmcnt(0) global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_cmpx_ne_u64_e64 v[6:7], v[8:9] s_cbranch_execz .LBB0_5 s_mov_b32 s5, 0 .p2align 6 .LBB0_3: s_sleep 1 s_clause 0x1 global_load_b64 v[1:2], v0, s[2:3] offset:40 global_load_b64 v[10:11], v0, s[2:3] v_dual_mov_b32 v9, v7 :: v_dual_mov_b32 v8, v6 s_waitcnt vmcnt(1) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_and_b32_e32 v1, v1, v8 v_and_b32_e32 v7, v2, v9 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[5:6], null, v1, 24, v[10:11] v_mov_b32_e32 v1, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[2:3], null, v7, 24, v[1:2] v_mov_b32_e32 v6, v2 global_load_b64 v[6:7], v[5:6], off glc s_waitcnt vmcnt(0) global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[8:9] s_or_b32 s5, vcc_lo, s5 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s5 s_cbranch_execnz .LBB0_3 s_or_b32 exec_lo, exec_lo, s5 .LBB0_5: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s4 .LBB0_6: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s1 v_mov_b32_e32 v5, 0 v_readfirstlane_b32 s4, v6 v_readfirstlane_b32 s5, v7 s_mov_b32 s8, exec_lo s_waitcnt lgkmcnt(0) s_clause 0x1 global_load_b64 v[8:9], v5, s[2:3] offset:40 global_load_b128 v[0:3], v5, s[2:3] s_waitcnt vmcnt(1) v_readfirstlane_b32 s6, v8 v_readfirstlane_b32 s7, v9 s_delay_alu instid0(VALU_DEP_1) s_and_b64 s[6:7], s[4:5], s[6:7] s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_8 v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, 0 s_mul_i32 s8, s7, 24 s_mul_hi_u32 s9, s6, 24 v_dual_mov_b32 v8, 2 :: v_dual_mov_b32 v9, 1 s_add_i32 s9, s9, s8 s_mul_i32 s8, s6, 24 s_waitcnt vmcnt(0) v_add_co_u32 v10, vcc_lo, v0, s8 v_add_co_ci_u32_e32 v11, vcc_lo, s9, v1, vcc_lo global_store_b128 v[10:11], v[6:9], off offset:8 .LBB0_8: s_or_b32 exec_lo, exec_lo, s1 s_lshl_b64 s[8:9], s[6:7], 12 v_lshlrev_b64 v[4:5], 6, v[4:5] s_waitcnt vmcnt(0) v_add_co_u32 v2, vcc_lo, v2, s8 v_add_co_ci_u32_e32 v7, vcc_lo, s9, v3, vcc_lo v_mov_b32_e32 v3, 0 s_mov_b32 s8, 0 s_delay_alu instid0(VALU_DEP_3) v_add_co_u32 v6, vcc_lo, v2, v4 v_mov_b32_e32 v2, 33 s_mov_b32 s9, s8 s_mov_b32 s10, s8 s_mov_b32 s11, s8 v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo v_mov_b32_e32 v4, v3 v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v8, s8 v_dual_mov_b32 v9, s9 :: v_dual_mov_b32 v10, s10 v_mov_b32_e32 v11, s11 s_clause 0x3 global_store_b128 v[6:7], v[2:5], off global_store_b128 v[6:7], v[8:11], off offset:16 global_store_b128 v[6:7], v[8:11], off offset:32 global_store_b128 v[6:7], v[8:11], off offset:48 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_16 v_dual_mov_b32 v10, 0 :: v_dual_mov_b32 v11, s4 v_mov_b32_e32 v12, s5 s_clause 0x1 global_load_b64 v[13:14], v10, s[2:3] offset:32 glc global_load_b64 v[2:3], v10, s[2:3] offset:40 s_waitcnt vmcnt(0) v_readfirstlane_b32 s8, v2 v_readfirstlane_b32 s9, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b64 s[8:9], s[8:9], s[4:5] s_mul_i32 s9, s9, 24 s_mul_hi_u32 s10, s8, 24 s_mul_i32 s8, s8, 24 s_add_i32 s10, s10, s9 v_add_co_u32 v8, vcc_lo, v0, s8 v_add_co_ci_u32_e32 v9, vcc_lo, s10, v1, vcc_lo s_mov_b32 s8, exec_lo global_store_b64 v[8:9], v[13:14], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[4:5], v10, v[11:14], s[2:3] offset:32 glc s_waitcnt vmcnt(0) v_cmpx_ne_u64_e64 v[4:5], v[13:14] s_cbranch_execz .LBB0_12 s_mov_b32 s9, 0 .LBB0_11: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5 s_sleep 1 global_store_b64 v[8:9], v[4:5], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[2:3], v10, v[2:5], s[2:3] offset:32 glc s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5] v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2 s_or_b32 s9, vcc_lo, s9 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s9 s_cbranch_execnz .LBB0_11 .LBB0_12: s_or_b32 exec_lo, exec_lo, s8 v_mov_b32_e32 v2, 0 s_mov_b32 s9, exec_lo s_mov_b32 s8, exec_lo v_mbcnt_lo_u32_b32 v4, s9, 0 global_load_b64 v[2:3], v2, s[2:3] offset:16 v_cmpx_eq_u32_e32 0, v4 s_cbranch_execz .LBB0_14 s_bcnt1_i32_b32 s9, s9 s_delay_alu instid0(SALU_CYCLE_1) v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9 s_waitcnt vmcnt(0) global_atomic_add_u64 v[2:3], v[4:5], off offset:8 .LBB0_14: s_or_b32 exec_lo, exec_lo, s8 s_waitcnt vmcnt(0) global_load_b64 v[4:5], v[2:3], off offset:16 s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5] s_cbranch_vccnz .LBB0_16 global_load_b32 v2, v[2:3], off offset:24 v_mov_b32_e32 v3, 0 s_waitcnt vmcnt(0) v_readfirstlane_b32 s8, v2 s_waitcnt_vscnt null, 0x0 global_store_b64 v[4:5], v[2:3], off s_and_b32 m0, s8, 0xff s_sendmsg sendmsg(MSG_INTERRUPT) .LBB0_16: s_or_b32 exec_lo, exec_lo, s1 s_mul_i32 s1, s7, 24 s_mul_hi_u32 s7, s6, 24 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) s_add_i32 s7, s7, s1 s_mul_i32 s1, s6, 24 v_add_co_u32 v0, vcc_lo, v0, s1 v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v0, 20 v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo s_branch .LBB0_20 .p2align 6 .LBB0_17: s_or_b32 exec_lo, exec_lo, s1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_readfirstlane_b32 s1, v2 s_cmp_eq_u32 s1, 0 s_cbranch_scc1 .LBB0_19 s_sleep 1 s_cbranch_execnz .LBB0_20 s_branch .LBB0_22 .p2align 6 .LBB0_19: s_branch .LBB0_22 .LBB0_20: v_mov_b32_e32 v2, 1 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_17 global_load_b32 v2, v[0:1], off glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_and_b32_e32 v2, 1, v2 s_branch .LBB0_17 .LBB0_22: global_load_b64 v[22:23], v[6:7], off s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_26 v_mov_b32_e32 v6, 0 s_clause 0x2 global_load_b64 v[2:3], v6, s[2:3] offset:40 global_load_b64 v[7:8], v6, s[2:3] offset:24 glc global_load_b64 v[4:5], v6, s[2:3] s_waitcnt vmcnt(2) v_add_co_u32 v9, vcc_lo, v2, 1 v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v9, s4 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9 v_and_b32_e32 v3, v1, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_and_b32_e32 v2, v0, v2 v_mul_lo_u32 v3, v3, 24 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mul_hi_u32 v9, v2, 24 v_mul_lo_u32 v2, v2, 24 v_add_nc_u32_e32 v3, v9, v3 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_add_co_u32 v4, vcc_lo, v4, v2 v_mov_b32_e32 v2, v7 v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo v_mov_b32_e32 v3, v8 global_store_b64 v[4:5], v[7:8], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc s_waitcnt vmcnt(0) v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8] s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_26 s_mov_b32 s0, 0 .LBB0_25: s_sleep 1 global_store_b64 v[4:5], v[2:3], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3] v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8 s_or_b32 s0, vcc_lo, s0 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s0 s_cbranch_execnz .LBB0_25 .LBB0_26: s_or_b32 exec_lo, exec_lo, s1 s_getpc_b64 s[4:5] s_add_u32 s4, s4, .str@rel32@lo+4 s_addc_u32 s5, s5, .str@rel32@hi+12 s_mov_b32 s0, -1 s_cmp_lg_u64 s[4:5], 0 s_cbranch_scc0 .LBB0_105 s_waitcnt vmcnt(0) v_dual_mov_b32 v1, v23 :: v_dual_and_b32 v0, -3, v22 v_mov_b32_e32 v25, 0 s_mov_b64 s[6:7], 6 s_branch .LBB0_29 .LBB0_28: s_or_b32 exec_lo, exec_lo, s1 s_sub_u32 s6, s6, s8 s_subb_u32 s7, s7, s9 s_add_u32 s4, s4, s8 s_addc_u32 s5, s5, s9 s_cmp_lg_u64 s[6:7], 0 s_cbranch_scc0 .LBB0_104 .LBB0_29: v_cmp_lt_u64_e64 s0, s[6:7], 56 s_delay_alu instid0(VALU_DEP_1) s_and_b32 s0, s0, exec_lo s_cselect_b32 s8, s6, 56 s_cselect_b32 s9, s7, 0 s_cmp_gt_u32 s8, 7 s_mov_b32 s0, -1 s_cbranch_scc1 .LBB0_34 v_mov_b32_e32 v2, 0 v_mov_b32_e32 v3, 0 s_cmp_eq_u32 s8, 0 s_cbranch_scc1 .LBB0_33 s_lshl_b64 s[0:1], s[8:9], 3 s_mov_b64 s[10:11], 0 s_mov_b64 s[12:13], s[4:5] .LBB0_32: global_load_u8 v4, v25, s[12:13] s_waitcnt vmcnt(0) v_and_b32_e32 v24, 0xffff, v4 s_delay_alu instid0(VALU_DEP_1) v_lshlrev_b64 v[4:5], s10, v[24:25] s_add_u32 s10, s10, 8 s_addc_u32 s11, s11, 0 s_add_u32 s12, s12, 1 s_addc_u32 s13, s13, 0 s_cmp_lg_u32 s0, s10 v_or_b32_e32 v2, v4, v2 v_or_b32_e32 v3, v5, v3 s_cbranch_scc1 .LBB0_32 .LBB0_33: s_mov_b32 s0, 0 s_mov_b32 s15, 0 .LBB0_34: s_and_not1_b32 vcc_lo, exec_lo, s0 s_mov_b64 s[0:1], s[4:5] s_cbranch_vccnz .LBB0_36 global_load_b64 v[2:3], v25, s[4:5] s_add_i32 s15, s8, -8 s_add_u32 s0, s4, 8 s_addc_u32 s1, s5, 0 .LBB0_36: s_cmp_gt_u32 s15, 7 s_cbranch_scc1 .LBB0_41 v_mov_b32_e32 v4, 0 v_mov_b32_e32 v5, 0 s_cmp_eq_u32 s15, 0 s_cbranch_scc1 .LBB0_40 s_mov_b64 s[10:11], 0 s_mov_b64 s[12:13], 0 .LBB0_39: s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s16, s0, s12 s_addc_u32 s17, s1, s13 s_add_u32 s12, s12, 1 global_load_u8 v6, v25, s[16:17] s_addc_u32 s13, s13, 0 s_waitcnt vmcnt(0) v_and_b32_e32 v24, 0xffff, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_lshlrev_b64 v[6:7], s10, v[24:25] s_add_u32 s10, s10, 8 s_addc_u32 s11, s11, 0 s_cmp_lg_u32 s15, s12 v_or_b32_e32 v4, v6, v4 s_delay_alu instid0(VALU_DEP_2) v_or_b32_e32 v5, v7, v5 s_cbranch_scc1 .LBB0_39 .LBB0_40: s_mov_b32 s14, 0 s_cbranch_execz .LBB0_42 s_branch .LBB0_43 .LBB0_41: .LBB0_42: global_load_b64 v[4:5], v25, s[0:1] s_add_i32 s14, s15, -8 s_add_u32 s0, s0, 8 s_addc_u32 s1, s1, 0 .LBB0_43: s_cmp_gt_u32 s14, 7 s_cbranch_scc1 .LBB0_48 v_mov_b32_e32 v6, 0 v_mov_b32_e32 v7, 0 s_cmp_eq_u32 s14, 0 s_cbranch_scc1 .LBB0_47 s_mov_b64 s[10:11], 0 s_mov_b64 s[12:13], 0 .LBB0_46: s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s16, s0, s12 s_addc_u32 s17, s1, s13 s_add_u32 s12, s12, 1 global_load_u8 v8, v25, s[16:17] s_addc_u32 s13, s13, 0 s_waitcnt vmcnt(0) v_and_b32_e32 v24, 0xffff, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_lshlrev_b64 v[8:9], s10, v[24:25] s_add_u32 s10, s10, 8 s_addc_u32 s11, s11, 0 s_cmp_lg_u32 s14, s12 v_or_b32_e32 v6, v8, v6 s_delay_alu instid0(VALU_DEP_2) v_or_b32_e32 v7, v9, v7 s_cbranch_scc1 .LBB0_46 .LBB0_47: s_mov_b32 s15, 0 s_cbranch_execz .LBB0_49 s_branch .LBB0_50 .LBB0_48: .LBB0_49: global_load_b64 v[6:7], v25, s[0:1] s_add_i32 s15, s14, -8 s_add_u32 s0, s0, 8 s_addc_u32 s1, s1, 0 .LBB0_50: s_cmp_gt_u32 s15, 7 s_cbranch_scc1 .LBB0_55 v_mov_b32_e32 v8, 0 v_mov_b32_e32 v9, 0 s_cmp_eq_u32 s15, 0 s_cbranch_scc1 .LBB0_54 s_mov_b64 s[10:11], 0 s_mov_b64 s[12:13], 0 .LBB0_53: s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s16, s0, s12 s_addc_u32 s17, s1, s13 s_add_u32 s12, s12, 1 global_load_u8 v10, v25, s[16:17] s_addc_u32 s13, s13, 0 s_waitcnt vmcnt(0) v_and_b32_e32 v24, 0xffff, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_lshlrev_b64 v[10:11], s10, v[24:25] s_add_u32 s10, s10, 8 s_addc_u32 s11, s11, 0 s_cmp_lg_u32 s15, s12 v_or_b32_e32 v8, v10, v8 s_delay_alu instid0(VALU_DEP_2) v_or_b32_e32 v9, v11, v9 s_cbranch_scc1 .LBB0_53 .LBB0_54: s_mov_b32 s14, 0 s_cbranch_execz .LBB0_56 s_branch .LBB0_57 .LBB0_55: .LBB0_56: global_load_b64 v[8:9], v25, s[0:1] s_add_i32 s14, s15, -8 s_add_u32 s0, s0, 8 s_addc_u32 s1, s1, 0 .LBB0_57: s_cmp_gt_u32 s14, 7 s_cbranch_scc1 .LBB0_62 v_mov_b32_e32 v10, 0 v_mov_b32_e32 v11, 0 s_cmp_eq_u32 s14, 0 s_cbranch_scc1 .LBB0_61 s_mov_b64 s[10:11], 0 s_mov_b64 s[12:13], 0 .LBB0_60: s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s16, s0, s12 s_addc_u32 s17, s1, s13 s_add_u32 s12, s12, 1 global_load_u8 v12, v25, s[16:17] s_addc_u32 s13, s13, 0 s_waitcnt vmcnt(0) v_and_b32_e32 v24, 0xffff, v12 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_lshlrev_b64 v[12:13], s10, v[24:25] s_add_u32 s10, s10, 8 s_addc_u32 s11, s11, 0 s_cmp_lg_u32 s14, s12 v_or_b32_e32 v10, v12, v10 s_delay_alu instid0(VALU_DEP_2) v_or_b32_e32 v11, v13, v11 s_cbranch_scc1 .LBB0_60 .LBB0_61: s_mov_b32 s15, 0 s_cbranch_execz .LBB0_63 s_branch .LBB0_64 .LBB0_62: .LBB0_63: global_load_b64 v[10:11], v25, s[0:1] s_add_i32 s15, s14, -8 s_add_u32 s0, s0, 8 s_addc_u32 s1, s1, 0 .LBB0_64: s_cmp_gt_u32 s15, 7 s_cbranch_scc1 .LBB0_69 v_mov_b32_e32 v12, 0 v_mov_b32_e32 v13, 0 s_cmp_eq_u32 s15, 0 s_cbranch_scc1 .LBB0_68 s_mov_b64 s[10:11], 0 s_mov_b64 s[12:13], 0 .LBB0_67: s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s16, s0, s12 s_addc_u32 s17, s1, s13 s_add_u32 s12, s12, 1 global_load_u8 v14, v25, s[16:17] s_addc_u32 s13, s13, 0 s_waitcnt vmcnt(0) v_and_b32_e32 v24, 0xffff, v14 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_lshlrev_b64 v[14:15], s10, v[24:25] s_add_u32 s10, s10, 8 s_addc_u32 s11, s11, 0 s_cmp_lg_u32 s15, s12 v_or_b32_e32 v12, v14, v12 s_delay_alu instid0(VALU_DEP_2) v_or_b32_e32 v13, v15, v13 s_cbranch_scc1 .LBB0_67 .LBB0_68: s_mov_b32 s14, 0 s_cbranch_execz .LBB0_70 s_branch .LBB0_71 .LBB0_69: .LBB0_70: global_load_b64 v[12:13], v25, s[0:1] s_add_i32 s14, s15, -8 s_add_u32 s0, s0, 8 s_addc_u32 s1, s1, 0 .LBB0_71: s_cmp_gt_u32 s14, 7 s_cbranch_scc1 .LBB0_76 v_mov_b32_e32 v14, 0 v_mov_b32_e32 v15, 0 s_cmp_eq_u32 s14, 0 s_cbranch_scc1 .LBB0_75 s_mov_b64 s[10:11], 0 s_mov_b64 s[12:13], s[0:1] .LBB0_74: global_load_u8 v16, v25, s[12:13] s_add_i32 s14, s14, -1 s_waitcnt vmcnt(0) v_and_b32_e32 v24, 0xffff, v16 s_delay_alu instid0(VALU_DEP_1) v_lshlrev_b64 v[16:17], s10, v[24:25] s_add_u32 s10, s10, 8 s_addc_u32 s11, s11, 0 s_add_u32 s12, s12, 1 s_addc_u32 s13, s13, 0 s_cmp_lg_u32 s14, 0 v_or_b32_e32 v14, v16, v14 v_or_b32_e32 v15, v17, v15 s_cbranch_scc1 .LBB0_74 .LBB0_75: s_cbranch_execz .LBB0_77 s_branch .LBB0_78 .LBB0_76: .LBB0_77: global_load_b64 v[14:15], v25, s[0:1] .LBB0_78: v_mov_b32_e32 v24, v20 v_mov_b32_e32 v26, 0 v_mov_b32_e32 v27, 0 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_readfirstlane_b32 s0, v24 v_cmp_eq_u32_e64 s0, s0, v24 s_delay_alu instid0(VALU_DEP_1) s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_84 global_load_b64 v[18:19], v25, s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv s_clause 0x1 global_load_b64 v[16:17], v25, s[2:3] offset:40 global_load_b64 v[26:27], v25, s[2:3] s_mov_b32 s10, exec_lo s_waitcnt vmcnt(1) v_and_b32_e32 v17, v17, v19 v_and_b32_e32 v16, v16, v18 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_lo_u32 v17, v17, 24 v_mul_hi_u32 v21, v16, 24 v_mul_lo_u32 v16, v16, 24 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v17, v21, v17 s_waitcnt vmcnt(0) v_add_co_u32 v16, vcc_lo, v26, v16 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v17, vcc_lo, v27, v17, vcc_lo global_load_b64 v[16:17], v[16:17], off glc s_waitcnt vmcnt(0) global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_cmpx_ne_u64_e64 v[26:27], v[18:19] s_cbranch_execz .LBB0_83 s_mov_b32 s11, 0 .p2align 6 .LBB0_81: s_sleep 1 s_clause 0x1 global_load_b64 v[16:17], v25, s[2:3] offset:40 global_load_b64 v[28:29], v25, s[2:3] v_dual_mov_b32 v18, v26 :: v_dual_mov_b32 v19, v27 s_waitcnt vmcnt(1) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_and_b32_e32 v16, v16, v18 s_waitcnt vmcnt(0) v_mad_u64_u32 v[26:27], null, v16, 24, v[28:29] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_mov_b32 v16, v27 :: v_dual_and_b32 v17, v17, v19 v_mad_u64_u32 v[27:28], null, v17, 24, v[16:17] global_load_b64 v[16:17], v[26:27], off glc s_waitcnt vmcnt(0) global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_cmp_eq_u64_e32 vcc_lo, v[26:27], v[18:19] s_or_b32 s11, vcc_lo, s11 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s11 s_cbranch_execnz .LBB0_81 s_or_b32 exec_lo, exec_lo, s11 .LBB0_83: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s10 .LBB0_84: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s1 s_clause 0x1 global_load_b64 v[28:29], v25, s[2:3] offset:40 global_load_b128 v[16:19], v25, s[2:3] v_readfirstlane_b32 s10, v26 v_readfirstlane_b32 s11, v27 s_mov_b32 s14, exec_lo s_waitcnt vmcnt(1) v_readfirstlane_b32 s12, v28 v_readfirstlane_b32 s13, v29 s_delay_alu instid0(VALU_DEP_1) s_and_b64 s[12:13], s[10:11], s[12:13] s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_86 v_dual_mov_b32 v26, s14 :: v_dual_mov_b32 v27, 0 s_mul_i32 s14, s13, 24 s_mul_hi_u32 s15, s12, 24 v_dual_mov_b32 v28, 2 :: v_dual_mov_b32 v29, 1 s_add_i32 s15, s15, s14 s_mul_i32 s14, s12, 24 s_waitcnt vmcnt(0) v_add_co_u32 v30, vcc_lo, v16, s14 v_add_co_ci_u32_e32 v31, vcc_lo, s15, v17, vcc_lo global_store_b128 v[30:31], v[26:29], off offset:8 .LBB0_86: s_or_b32 exec_lo, exec_lo, s1 v_cmp_gt_u64_e64 vcc_lo, s[6:7], 56 v_or_b32_e32 v21, 2, v0 s_lshl_b64 s[14:15], s[12:13], 12 v_lshlrev_b64 v[26:27], 6, v[24:25] s_lshl_b32 s1, s8, 2 s_delay_alu instid0(SALU_CYCLE_1) s_add_i32 s1, s1, 28 v_cndmask_b32_e32 v0, v21, v0, vcc_lo s_waitcnt vmcnt(0) v_add_co_u32 v18, vcc_lo, v18, s14 v_add_co_ci_u32_e32 v19, vcc_lo, s15, v19, vcc_lo s_and_b32 s1, s1, 0x1e0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_add_co_u32 v18, vcc_lo, v18, v26 v_and_or_b32 v0, v0, 0xffffff1f, s1 v_add_co_ci_u32_e32 v19, vcc_lo, v19, v27, vcc_lo s_clause 0x3 global_store_b128 v[18:19], v[0:3], off global_store_b128 v[18:19], v[4:7], off offset:16 global_store_b128 v[18:19], v[8:11], off offset:32 global_store_b128 v[18:19], v[12:15], off offset:48 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_94 s_clause 0x1 global_load_b64 v[8:9], v25, s[2:3] offset:32 glc global_load_b64 v[0:1], v25, s[2:3] offset:40 v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11 s_waitcnt vmcnt(0) v_readfirstlane_b32 s14, v0 v_readfirstlane_b32 s15, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b64 s[14:15], s[14:15], s[10:11] s_mul_i32 s15, s15, 24 s_mul_hi_u32 s16, s14, 24 s_mul_i32 s14, s14, 24 s_add_i32 s16, s16, s15 v_add_co_u32 v4, vcc_lo, v16, s14 v_add_co_ci_u32_e32 v5, vcc_lo, s16, v17, vcc_lo s_mov_b32 s14, exec_lo global_store_b64 v[4:5], v[8:9], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[2:3], v25, v[6:9], s[2:3] offset:32 glc s_waitcnt vmcnt(0) v_cmpx_ne_u64_e64 v[2:3], v[8:9] s_cbranch_execz .LBB0_90 s_mov_b32 s15, 0 .LBB0_89: v_dual_mov_b32 v0, s10 :: v_dual_mov_b32 v1, s11 s_sleep 1 global_store_b64 v[4:5], v[2:3], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[0:1], v25, v[0:3], s[2:3] offset:32 glc s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3] v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 s_or_b32 s15, vcc_lo, s15 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s15 s_cbranch_execnz .LBB0_89 .LBB0_90: s_or_b32 exec_lo, exec_lo, s14 global_load_b64 v[0:1], v25, s[2:3] offset:16 s_mov_b32 s15, exec_lo s_mov_b32 s14, exec_lo v_mbcnt_lo_u32_b32 v2, s15, 0 s_delay_alu instid0(VALU_DEP_1) v_cmpx_eq_u32_e32 0, v2 s_cbranch_execz .LBB0_92 s_bcnt1_i32_b32 s15, s15 s_delay_alu instid0(SALU_CYCLE_1) v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v2, s15 s_waitcnt vmcnt(0) global_atomic_add_u64 v[0:1], v[2:3], off offset:8 .LBB0_92: s_or_b32 exec_lo, exec_lo, s14 s_waitcnt vmcnt(0) global_load_b64 v[2:3], v[0:1], off offset:16 s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3] s_cbranch_vccnz .LBB0_94 global_load_b32 v24, v[0:1], off offset:24 s_waitcnt vmcnt(0) v_readfirstlane_b32 s14, v24 s_waitcnt_vscnt null, 0x0 global_store_b64 v[2:3], v[24:25], off s_and_b32 m0, s14, 0xff s_sendmsg sendmsg(MSG_INTERRUPT) .LBB0_94: s_or_b32 exec_lo, exec_lo, s1 s_mul_i32 s1, s13, 24 s_mul_hi_u32 s13, s12, 24 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) s_add_i32 s13, s13, s1 s_mul_i32 s1, s12, 24 v_add_co_u32 v0, vcc_lo, v16, s1 v_add_co_ci_u32_e32 v1, vcc_lo, s13, v17, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v0, 20 v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo s_branch .LBB0_98 .p2align 6 .LBB0_95: s_or_b32 exec_lo, exec_lo, s1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_readfirstlane_b32 s1, v2 s_cmp_eq_u32 s1, 0 s_cbranch_scc1 .LBB0_97 s_sleep 1 s_cbranch_execnz .LBB0_98 s_branch .LBB0_100 .p2align 6 .LBB0_97: s_branch .LBB0_100 .LBB0_98: v_mov_b32_e32 v2, 1 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_95 global_load_b32 v2, v[0:1], off glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_and_b32_e32 v2, 1, v2 s_branch .LBB0_95 .LBB0_100: global_load_b64 v[0:1], v[18:19], off s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_28 s_clause 0x2 global_load_b64 v[4:5], v25, s[2:3] offset:40 global_load_b64 v[8:9], v25, s[2:3] offset:24 glc global_load_b64 v[6:7], v25, s[2:3] s_waitcnt vmcnt(2) v_add_co_u32 v10, vcc_lo, v4, 1 v_add_co_ci_u32_e32 v11, vcc_lo, 0, v5, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, v10, s10 v_add_co_ci_u32_e32 v3, vcc_lo, s11, v11, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3] v_dual_cndmask_b32 v3, v3, v11 :: v_dual_cndmask_b32 v2, v2, v10 v_and_b32_e32 v5, v3, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_and_b32_e32 v4, v2, v4 v_mul_hi_u32 v10, v4, 24 v_mul_lo_u32 v4, v4, 24 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_add_co_u32 v6, vcc_lo, v6, v4 v_mov_b32_e32 v4, v8 v_mul_lo_u32 v5, v5, 24 v_add_nc_u32_e32 v5, v10, v5 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo v_mov_b32_e32 v5, v9 global_store_b64 v[6:7], v[8:9], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[4:5], v25, v[2:5], s[2:3] offset:24 glc s_waitcnt vmcnt(0) v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[8:9] s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_28 s_mov_b32 s0, 0 .LBB0_103: s_sleep 1 global_store_b64 v[6:7], v[4:5], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[8:9], v25, v[2:5], s[2:3] offset:24 glc s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, v[8:9], v[4:5] v_dual_mov_b32 v4, v8 :: v_dual_mov_b32 v5, v9 s_or_b32 s0, vcc_lo, s0 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s0 s_cbranch_execnz .LBB0_103 s_branch .LBB0_28 .LBB0_104: s_mov_b32 s0, 0 .LBB0_105: s_delay_alu instid0(SALU_CYCLE_1) s_and_b32 vcc_lo, exec_lo, s0 s_cbranch_vccz .LBB0_132 v_readfirstlane_b32 s0, v20 v_mov_b32_e32 v4, 0 v_mov_b32_e32 v5, 0 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_eq_u32_e64 s0, s0, v20 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_112 s_waitcnt vmcnt(0) v_mov_b32_e32 v0, 0 s_mov_b32 s4, exec_lo global_load_b64 v[6:7], v0, s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv s_clause 0x1 global_load_b64 v[1:2], v0, s[2:3] offset:40 global_load_b64 v[3:4], v0, s[2:3] s_waitcnt vmcnt(1) v_and_b32_e32 v1, v1, v6 v_and_b32_e32 v2, v2, v7 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_hi_u32 v5, v1, 24 v_mul_lo_u32 v2, v2, 24 v_mul_lo_u32 v1, v1, 24 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v2, v5, v2 s_waitcnt vmcnt(0) v_add_co_u32 v1, vcc_lo, v3, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v2, vcc_lo, v4, v2, vcc_lo global_load_b64 v[4:5], v[1:2], off glc s_waitcnt vmcnt(0) global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_cmpx_ne_u64_e64 v[4:5], v[6:7] s_cbranch_execz .LBB0_111 s_mov_b32 s5, 0 .p2align 6 .LBB0_109: s_sleep 1 s_clause 0x1 global_load_b64 v[1:2], v0, s[2:3] offset:40 global_load_b64 v[8:9], v0, s[2:3] v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4 s_waitcnt vmcnt(1) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_and_b32_e32 v1, v1, v6 s_waitcnt vmcnt(0) v_mad_u64_u32 v[3:4], null, v1, 24, v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_mov_b32 v1, v4 :: v_dual_and_b32 v2, v2, v7 v_mad_u64_u32 v[4:5], null, v2, 24, v[1:2] global_load_b64 v[4:5], v[3:4], off glc s_waitcnt vmcnt(0) global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7] s_or_b32 s5, vcc_lo, s5 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s5 s_cbranch_execnz .LBB0_109 s_or_b32 exec_lo, exec_lo, s5 .LBB0_111: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s4 .LBB0_112: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s1 v_mov_b32_e32 v21, 0 v_readfirstlane_b32 s4, v4 v_readfirstlane_b32 s5, v5 s_mov_b32 s8, exec_lo s_clause 0x1 global_load_b64 v[6:7], v21, s[2:3] offset:40 global_load_b128 v[0:3], v21, s[2:3] s_waitcnt vmcnt(1) v_readfirstlane_b32 s6, v6 v_readfirstlane_b32 s7, v7 s_delay_alu instid0(VALU_DEP_1) s_and_b64 s[6:7], s[4:5], s[6:7] s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_114 v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, 0 s_mul_i32 s8, s7, 24 s_mul_hi_u32 s9, s6, 24 v_dual_mov_b32 v6, 2 :: v_dual_mov_b32 v7, 1 s_add_i32 s9, s9, s8 s_mul_i32 s8, s6, 24 s_waitcnt vmcnt(0) v_add_co_u32 v8, vcc_lo, v0, s8 v_add_co_ci_u32_e32 v9, vcc_lo, s9, v1, vcc_lo global_store_b128 v[8:9], v[4:7], off offset:8 .LBB0_114: s_or_b32 exec_lo, exec_lo, s1 s_lshl_b64 s[8:9], s[6:7], 12 v_and_or_b32 v22, v22, 0xffffff1d, 34 s_waitcnt vmcnt(0) v_add_co_u32 v4, vcc_lo, v2, s8 v_add_co_ci_u32_e32 v5, vcc_lo, s9, v3, vcc_lo v_lshlrev_b64 v[2:3], 6, v[20:21] s_mov_b32 s8, 0 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_mov_b32 s9, s8 s_mov_b32 s10, s8 s_mov_b32 s11, s8 v_add_co_u32 v8, vcc_lo, v4, v2 v_mov_b32_e32 v6, 0 v_add_co_ci_u32_e32 v9, vcc_lo, v5, v3, vcc_lo v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v5, s11 v_dual_mov_b32 v3, s9 :: v_dual_mov_b32 v4, s10 s_delay_alu instid0(VALU_DEP_4) v_mov_b32_e32 v7, v6 s_clause 0x4 global_store_b64 v[8:9], v[22:23], off global_store_b128 v[8:9], v[2:5], off offset:8 global_store_b128 v[8:9], v[2:5], off offset:24 global_store_b128 v[8:9], v[2:5], off offset:40 global_store_b64 v[8:9], v[6:7], off offset:56 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_122 v_dual_mov_b32 v8, 0 :: v_dual_mov_b32 v9, s4 v_mov_b32_e32 v10, s5 s_clause 0x1 global_load_b64 v[11:12], v8, s[2:3] offset:32 glc global_load_b64 v[2:3], v8, s[2:3] offset:40 s_waitcnt vmcnt(0) v_readfirstlane_b32 s8, v2 v_readfirstlane_b32 s9, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b64 s[8:9], s[8:9], s[4:5] s_mul_i32 s9, s9, 24 s_mul_hi_u32 s10, s8, 24 s_mul_i32 s8, s8, 24 s_add_i32 s10, s10, s9 v_add_co_u32 v6, vcc_lo, v0, s8 v_add_co_ci_u32_e32 v7, vcc_lo, s10, v1, vcc_lo s_mov_b32 s8, exec_lo global_store_b64 v[6:7], v[11:12], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[4:5], v8, v[9:12], s[2:3] offset:32 glc s_waitcnt vmcnt(0) v_cmpx_ne_u64_e64 v[4:5], v[11:12] s_cbranch_execz .LBB0_118 s_mov_b32 s9, 0 .LBB0_117: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5 s_sleep 1 global_store_b64 v[6:7], v[4:5], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[2:3], v8, v[2:5], s[2:3] offset:32 glc s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5] v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2 s_or_b32 s9, vcc_lo, s9 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s9 s_cbranch_execnz .LBB0_117 .LBB0_118: s_or_b32 exec_lo, exec_lo, s8 v_mov_b32_e32 v2, 0 s_mov_b32 s9, exec_lo s_mov_b32 s8, exec_lo v_mbcnt_lo_u32_b32 v4, s9, 0 global_load_b64 v[2:3], v2, s[2:3] offset:16 v_cmpx_eq_u32_e32 0, v4 s_cbranch_execz .LBB0_120 s_bcnt1_i32_b32 s9, s9 s_delay_alu instid0(SALU_CYCLE_1) v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9 s_waitcnt vmcnt(0) global_atomic_add_u64 v[2:3], v[4:5], off offset:8 .LBB0_120: s_or_b32 exec_lo, exec_lo, s8 s_waitcnt vmcnt(0) global_load_b64 v[4:5], v[2:3], off offset:16 s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5] s_cbranch_vccnz .LBB0_122 global_load_b32 v2, v[2:3], off offset:24 v_mov_b32_e32 v3, 0 s_waitcnt vmcnt(0) v_readfirstlane_b32 s8, v2 s_waitcnt_vscnt null, 0x0 global_store_b64 v[4:5], v[2:3], off s_and_b32 m0, s8, 0xff s_sendmsg sendmsg(MSG_INTERRUPT) .LBB0_122: s_or_b32 exec_lo, exec_lo, s1 s_mul_i32 s1, s7, 24 s_mul_hi_u32 s7, s6, 24 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) s_add_i32 s7, s7, s1 s_mul_i32 s1, s6, 24 v_add_co_u32 v0, vcc_lo, v0, s1 v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v0, 20 v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo s_branch .LBB0_126 .p2align 6 .LBB0_123: s_or_b32 exec_lo, exec_lo, s1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_readfirstlane_b32 s1, v2 s_cmp_eq_u32 s1, 0 s_cbranch_scc1 .LBB0_125 s_sleep 1 s_cbranch_execnz .LBB0_126 s_branch .LBB0_128 .p2align 6 .LBB0_125: s_branch .LBB0_128 .LBB0_126: v_mov_b32_e32 v2, 1 s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_123 global_load_b32 v2, v[0:1], off glc s_waitcnt vmcnt(0) buffer_gl1_inv buffer_gl0_inv v_and_b32_e32 v2, 1, v2 s_branch .LBB0_123 .LBB0_128: s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_132 v_mov_b32_e32 v6, 0 s_clause 0x2 global_load_b64 v[2:3], v6, s[2:3] offset:40 global_load_b64 v[7:8], v6, s[2:3] offset:24 glc global_load_b64 v[4:5], v6, s[2:3] s_waitcnt vmcnt(2) v_add_co_u32 v9, vcc_lo, v2, 1 v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v9, s4 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9 v_and_b32_e32 v3, v1, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_and_b32_e32 v2, v0, v2 v_mul_lo_u32 v3, v3, 24 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mul_hi_u32 v9, v2, 24 v_mul_lo_u32 v2, v2, 24 v_add_nc_u32_e32 v3, v9, v3 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_add_co_u32 v4, vcc_lo, v4, v2 v_mov_b32_e32 v2, v7 v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo v_mov_b32_e32 v3, v8 global_store_b64 v[4:5], v[7:8], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc s_waitcnt vmcnt(0) v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8] s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_132 s_mov_b32 s0, 0 .LBB0_131: s_sleep 1 global_store_b64 v[4:5], v[2:3], off s_waitcnt_vscnt null, 0x0 global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc s_waitcnt vmcnt(0) v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3] v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8 s_or_b32 s0, vcc_lo, s0 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s0 s_cbranch_execnz .LBB0_131 .LBB0_132: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9fooKernelv .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 256 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 32 .amdhsa_next_free_sgpr 18 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9fooKernelv, .Lfunc_end0-_Z9fooKernelv .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type .str,@object .section .rodata.str1.1,"aMS",@progbits,1 .str: .asciz "foo!\n" .size .str, 6 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: hidden_block_count_x - .offset: 4 .size: 4 .value_kind: hidden_block_count_y - .offset: 8 .size: 4 .value_kind: hidden_block_count_z - .offset: 12 .size: 2 .value_kind: hidden_group_size_x - .offset: 14 .size: 2 .value_kind: hidden_group_size_y - .offset: 16 .size: 2 .value_kind: hidden_group_size_z - .offset: 18 .size: 2 .value_kind: hidden_remainder_x - .offset: 20 .size: 2 .value_kind: hidden_remainder_y - .offset: 22 .size: 2 .value_kind: hidden_remainder_z - .offset: 40 .size: 8 .value_kind: hidden_global_offset_x - .offset: 48 .size: 8 .value_kind: hidden_global_offset_y - .offset: 56 .size: 8 .value_kind: hidden_global_offset_z - .offset: 64 .size: 2 .value_kind: hidden_grid_dims - .offset: 80 .size: 8 .value_kind: hidden_hostcall_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 256 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9fooKernelv .private_segment_fixed_size: 0 .sgpr_count: 20 .sgpr_spill_count: 0 .symbol: _Z9fooKernelv.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 32 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000840ed_00000000-6_foo.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z27__device_stub__Z9fooKernelvv .type _Z27__device_stub__Z9fooKernelvv, @function _Z27__device_stub__Z9fooKernelvv: .LFB3694: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z9fooKernelv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE3694: .size _Z27__device_stub__Z9fooKernelvv, .-_Z27__device_stub__Z9fooKernelvv .globl _Z9fooKernelv .type _Z9fooKernelv, @function _Z9fooKernelv: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z27__device_stub__Z9fooKernelvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _Z9fooKernelv, .-_Z9fooKernelv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "foo done!\n" .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 call _Z3barv@PLT movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: call cudaDeviceSynchronize@PLT leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z27__device_stub__Z9fooKernelvv jmp .L12 .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z9fooKernelv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z9fooKernelv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "foo.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z24__device_stub__fooKernelv # -- Begin function _Z24__device_stub__fooKernelv .p2align 4, 0x90 .type _Z24__device_stub__fooKernelv,@function _Z24__device_stub__fooKernelv: # @_Z24__device_stub__fooKernelv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z9fooKernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z24__device_stub__fooKernelv, .Lfunc_end0-_Z24__device_stub__fooKernelv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 callq _Z3barv movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z9fooKernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceSynchronize movl $_ZSt4cout, %edi movl $.L.str, %esi movl $10, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9fooKernelv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z9fooKernelv,@object # @_Z9fooKernelv .section .rodata,"a",@progbits .globl _Z9fooKernelv .p2align 3, 0x0 _Z9fooKernelv: .quad _Z24__device_stub__fooKernelv .size _Z9fooKernelv, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "foo done!\n" .size .L.str, 11 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z9fooKernelv" .size .L__unnamed_1, 14 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__fooKernelv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9fooKernelv .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdlib.h> #include <stdio.h> #include <vector> #include <numeric> #include <iostream> // Here you can set the device ID that was assigned to you #define MYDEVICE 0 double random_double(void) { return 1.0; // return static_cast<double>(rand()) / RAND_MAX; // summing random doubles give numerical instability. how to do collaborative sorting? } // Part 1 of 6: implement the kernel __global__ void block_sum(const double *input, double *per_block_results, const size_t n) { //fill me __shared__ double sdata[256]; int i = 512 * blockIdx.x + threadIdx.x; if (i + 256 < n){ sdata[threadIdx.x] = input[i] + input[i + 256]; __syncthreads(); //atomicAdd(&per_block_results[blockIdx.x], sdata[threadIdx.x]); int totalThreads = blockDim.x; while(totalThreads >1){ totalThreads = (totalThreads >> 1); if (threadIdx.x < totalThreads){ sdata[threadIdx.x] += sdata[threadIdx.x + totalThreads]; } __syncthreads(); } } __syncthreads(); per_block_results[blockIdx.x] = sdata[0]; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(void) { const int blockDim = 256; // create array of 256ki elements const int num_elements = 1<<18; //const int num_elements = 512; srand(time(NULL)); // generate random input on the host std::vector<double> h_input(num_elements); for(int i = 0; i < h_input.size(); ++i) { h_input[i] = random_double(); } const double host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f); std::cerr << "Host sum: " << host_result << std::endl; //Part 1 of 6: move input to device memory double *d_input = 0; cudaMalloc((void**)&d_input, num_elements * sizeof(double) ); cudaMemcpy(d_input, h_input.data(), num_elements * sizeof(double), cudaMemcpyHostToDevice); // Part 1 of 6: allocate the partial sums: How much space does it need? double *d_partial_sums_and_total = 0; cudaMalloc((void**)&d_partial_sums_and_total, num_elements / blockDim / 2 * sizeof(double) ); // Part 1 of 6: copy the result back to the host double *d_result = 0; double device_result = 0; cudaMalloc((void**)&d_result, 1 * sizeof(double)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // Part 1 of 6: launch one kernel to compute, per-block, a partial sum. How much shared memory does it need? block_sum<<<num_elements / blockDim / 2, blockDim>>>(d_input, d_partial_sums_and_total, num_elements); block_sum<<<1, blockDim>>>(d_partial_sums_and_total, d_result, num_elements / blockDim /2); cudaEventRecord(stop); // Part 1 of 6: compute the sum of the partial sums cudaMemcpy(&device_result, d_result, 1 * sizeof(double), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float timeinms = 0.0; cudaEventElapsedTime(&timeinms, start, stop); std::cout << "Device sum: " << device_result << std::endl; std::cout << "ElapsedTime:" << timeinms << std::endl; // Part 1 of 6: deallocate device memory cudaFree(d_input); cudaFree(d_partial_sums_and_total); cudaFree(d_result); return 0; }
code for sm_80 Function : _Z9block_sumPKdPdm .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */ /* 0x000e240000002100 */ /*0040*/ IMAD R2, R8, 0x200, R9 ; /* 0x0000020008027824 */ /* 0x001fca00078e0209 */ /*0050*/ IADD3 R0, R2, 0x100, RZ ; /* 0x0000010002007810 */ /* 0x000fc80007ffe0ff */ /*0060*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */ /* 0x000fe40003f06070 */ /*0070*/ SHF.R.S32.HI R0, RZ, 0x1f, R0 ; /* 0x0000001fff007819 */ /* 0x000fc80000011400 */ /*0080*/ ISETP.GE.U32.AND.EX P0, PT, R0, c[0x0][0x174], PT, P0 ; /* 0x00005d0000007a0c */ /* 0x000fda0003f06100 */ /*0090*/ @P0 BRA 0x230 ; /* 0x0000019000000947 */ /* 0x000fea0003800000 */ /*00a0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */ /* 0x000fc800078e00ff */ /*00b0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fca00078e0203 */ /*00c0*/ LDG.E.64 R4, [R2.64+0x800] ; /* 0x0008000402047981 */ /* 0x000ea8000c1e1b00 */ /*00d0*/ LDG.E.64 R6, [R2.64] ; /* 0x0000000402067981 */ /* 0x000ea2000c1e1b00 */ /*00e0*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */ /* 0x000fe20000000f00 */ /*00f0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */ /* 0x000fe60003800000 */ /*0100*/ ISETP.GE.AND P0, PT, R0, 0x2, PT ; /* 0x000000020000780c */ /* 0x000fe20003f06270 */ /*0110*/ DADD R4, R4, R6 ; /* 0x0000000004047229 */ /* 0x004e0e0000000006 */ /*0120*/ STS.64 [R9.X8], R4 ; /* 0x0000000409007388 */ /* 0x0011e80000008a00 */ /*0130*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0140*/ @!P0 BRA 0x230 ; /* 0x000000e000008947 */ /* 0x000fea0003800000 */ /*0150*/ IMAD.SHL.U32 R0, R9, 0x8, RZ ; /* 0x0000000809007824 */ /* 0x000fe400078e00ff */ /*0160*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff067624 */ /* 0x000fca00078e00ff */ /*0170*/ SHF.R.U32.HI R7, RZ, 0x1, R6 ; /* 0x00000001ff077819 */ /* 0x000fe20000011606 */ /*0180*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */ /* 0x000fe60003800000 */ /*0190*/ ISETP.GE.U32.AND P0, PT, R9, R7, PT ; /* 0x000000070900720c */ /* 0x000fda0003f06070 */ /*01a0*/ @!P0 LEA R4, R7, R0, 0x3 ; /* 0x0000000007048211 */ /* 0x001fe200078e18ff */ /*01b0*/ @!P0 LDS.64 R2, [R9.X8] ; /* 0x0000000009028984 */ /* 0x000fea0000008a00 */ /*01c0*/ @!P0 LDS.64 R4, [R4] ; /* 0x0000000004048984 */ /* 0x000e240000000a00 */ /*01d0*/ @!P0 DADD R2, R2, R4 ; /* 0x0000000002028229 */ /* 0x001e0e0000000004 */ /*01e0*/ @!P0 STS.64 [R9.X8], R2 ; /* 0x0000000209008388 */ /* 0x0011e80000008a00 */ /*01f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0200*/ ISETP.GT.U32.AND P0, PT, R6, 0x3, PT ; /* 0x000000030600780c */ /* 0x000fe20003f04070 */ /*0210*/ IMAD.MOV.U32 R6, RZ, RZ, R7 ; /* 0x000000ffff067224 */ /* 0x000fd800078e0007 */ /*0220*/ @P0 BRA 0x170 ; /* 0xffffff4000000947 */ /* 0x001fea000383ffff */ /*0230*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */ /* 0x000fe20003800000 */ /*0240*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0250*/ MOV R5, 0x8 ; /* 0x0000000800057802 */ /* 0x001fca0000000f00 */ /*0260*/ IMAD.WIDE.U32 R4, R8, R5, c[0x0][0x168] ; /* 0x00005a0008047625 */ /* 0x000fe200078e0005 */ /*0270*/ LDS.64 R2, [RZ] ; /* 0x00000000ff027984 */ /* 0x000e280000000a00 */ /*0280*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */ /* 0x001fe2000c101b04 */ /*0290*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*02a0*/ BRA 0x2a0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdlib.h> #include <stdio.h> #include <vector> #include <numeric> #include <iostream> // Here you can set the device ID that was assigned to you #define MYDEVICE 0 double random_double(void) { return 1.0; // return static_cast<double>(rand()) / RAND_MAX; // summing random doubles give numerical instability. how to do collaborative sorting? } // Part 1 of 6: implement the kernel __global__ void block_sum(const double *input, double *per_block_results, const size_t n) { //fill me __shared__ double sdata[256]; int i = 512 * blockIdx.x + threadIdx.x; if (i + 256 < n){ sdata[threadIdx.x] = input[i] + input[i + 256]; __syncthreads(); //atomicAdd(&per_block_results[blockIdx.x], sdata[threadIdx.x]); int totalThreads = blockDim.x; while(totalThreads >1){ totalThreads = (totalThreads >> 1); if (threadIdx.x < totalThreads){ sdata[threadIdx.x] += sdata[threadIdx.x + totalThreads]; } __syncthreads(); } } __syncthreads(); per_block_results[blockIdx.x] = sdata[0]; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(void) { const int blockDim = 256; // create array of 256ki elements const int num_elements = 1<<18; //const int num_elements = 512; srand(time(NULL)); // generate random input on the host std::vector<double> h_input(num_elements); for(int i = 0; i < h_input.size(); ++i) { h_input[i] = random_double(); } const double host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f); std::cerr << "Host sum: " << host_result << std::endl; //Part 1 of 6: move input to device memory double *d_input = 0; cudaMalloc((void**)&d_input, num_elements * sizeof(double) ); cudaMemcpy(d_input, h_input.data(), num_elements * sizeof(double), cudaMemcpyHostToDevice); // Part 1 of 6: allocate the partial sums: How much space does it need? double *d_partial_sums_and_total = 0; cudaMalloc((void**)&d_partial_sums_and_total, num_elements / blockDim / 2 * sizeof(double) ); // Part 1 of 6: copy the result back to the host double *d_result = 0; double device_result = 0; cudaMalloc((void**)&d_result, 1 * sizeof(double)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // Part 1 of 6: launch one kernel to compute, per-block, a partial sum. How much shared memory does it need? block_sum<<<num_elements / blockDim / 2, blockDim>>>(d_input, d_partial_sums_and_total, num_elements); block_sum<<<1, blockDim>>>(d_partial_sums_and_total, d_result, num_elements / blockDim /2); cudaEventRecord(stop); // Part 1 of 6: compute the sum of the partial sums cudaMemcpy(&device_result, d_result, 1 * sizeof(double), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float timeinms = 0.0; cudaEventElapsedTime(&timeinms, start, stop); std::cout << "Device sum: " << device_result << std::endl; std::cout << "ElapsedTime:" << timeinms << std::endl; // Part 1 of 6: deallocate device memory cudaFree(d_input); cudaFree(d_partial_sums_and_total); cudaFree(d_result); return 0; }
.file "tmpxft_00021dcb_00000000-6_reduction2.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB4075: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4075: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z13random_doublev .type _Z13random_doublev, @function _Z13random_doublev: .LFB4071: .cfi_startproc endbr64 movsd .LC0(%rip), %xmm0 ret .cfi_endproc .LFE4071: .size _Z13random_doublev, .-_Z13random_doublev .globl _Z32__device_stub__Z9block_sumPKdPdmPKdPdm .type _Z32__device_stub__Z9block_sumPKdPdmPKdPdm, @function _Z32__device_stub__Z9block_sumPKdPdmPKdPdm: .LFB4097: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L8 .L4: movq 120(%rsp), %rax subq %fs:40, %rax jne .L9 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L8: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9block_sumPKdPdm(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L4 .L9: call __stack_chk_fail@PLT .cfi_endproc .LFE4097: .size _Z32__device_stub__Z9block_sumPKdPdmPKdPdm, .-_Z32__device_stub__Z9block_sumPKdPdmPKdPdm .globl _Z9block_sumPKdPdm .type _Z9block_sumPKdPdm, @function _Z9block_sumPKdPdm: .LFB4098: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z9block_sumPKdPdmPKdPdm addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4098: .size _Z9block_sumPKdPdm, .-_Z9block_sumPKdPdm .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "_Z9block_sumPKdPdm" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB4100: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z9block_sumPKdPdm(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4100: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .text._ZNSt6vectorIdSaIdEED2Ev,"axG",@progbits,_ZNSt6vectorIdSaIdEED5Ev,comdat .align 2 .weak _ZNSt6vectorIdSaIdEED2Ev .type _ZNSt6vectorIdSaIdEED2Ev, @function _ZNSt6vectorIdSaIdEED2Ev: .LFB4410: .cfi_startproc endbr64 movq (%rdi), %rax testq %rax, %rax je .L17 subq $8, %rsp .cfi_def_cfa_offset 16 movq 16(%rdi), %rsi subq %rax, %rsi movq %rax, %rdi call _ZdlPvm@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .L17: ret .cfi_endproc .LFE4410: .size _ZNSt6vectorIdSaIdEED2Ev, .-_ZNSt6vectorIdSaIdEED2Ev .weak _ZNSt6vectorIdSaIdEED1Ev .set _ZNSt6vectorIdSaIdEED1Ev,_ZNSt6vectorIdSaIdEED2Ev .section .rodata.str1.1 .LC4: .string "Host sum: " .LC5: .string "Device sum: " .LC6: .string "ElapsedTime:" .text .globl main .type main, @function main: .LFB4072: .cfi_startproc .cfi_personality 0x9b,DW.ref.__gxx_personality_v0 .cfi_lsda 0x1b,.LLSDA4072 endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 addq $-128, %rsp .cfi_def_cfa_offset 144 movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT movl $2097152, %edi .LEHB0: call _Znwm@PLT .LEHE0: movq %rax, %rbx movq %rax, 96(%rsp) leaq 2097152(%rax), %rdx movq %rdx, 112(%rsp) movq $0x000000000, (%rax) leaq 8(%rax), %rax .L21: movq $0x000000000, (%rax) addq $8, %rax cmpq %rdx, %rax jne .L21 movq %rdx, 104(%rsp) movq %rbx, %rax leaq 2097152(%rbx), %rcx movq %rbx, %rdx movsd .LC0(%rip), %xmm0 .L22: movsd %xmm0, (%rdx) addq $8, %rdx cmpq %rcx, %rdx jne .L22 pxor %xmm0, %xmm0 .L23: cvtss2sd %xmm0, %xmm0 addsd (%rax), %xmm0 cvtsd2ss %xmm0, %xmm0 addq $8, %rax cmpq %rcx, %rax jne .L23 pxor %xmm1, %xmm1 cvtss2sd %xmm0, %xmm1 movsd %xmm1, 8(%rsp) leaq .LC4(%rip), %rsi leaq _ZSt4cerr(%rip), %rdi .LEHB1: call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movsd 8(%rsp), %xmm0 call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq $0, 24(%rsp) leaq 24(%rsp), %rdi movl $2097152, %esi call cudaMalloc@PLT movl $1, %ecx movl $2097152, %edx movq %rbx, %rsi movq 24(%rsp), %rdi call cudaMemcpy@PLT movq $0, 32(%rsp) leaq 32(%rsp), %rdi movl $4096, %esi call cudaMalloc@PLT movq $0, 40(%rsp) movq $0x000000000, 48(%rsp) leaq 40(%rsp), %rdi movl $8, %esi call cudaMalloc@PLT leaq 56(%rsp), %rdi call cudaEventCreate@PLT leaq 64(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 56(%rsp), %rdi call cudaEventRecord@PLT movl $256, 84(%rsp) movl $1, 88(%rsp) movl $1, 92(%rsp) movl $512, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $0, %r9d movl $0, %r8d movq 84(%rsp), %rdx movl $1, %ecx movq 72(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L24 movl $262144, %edx movq 32(%rsp), %rsi movq 24(%rsp), %rdi call _Z32__device_stub__Z9block_sumPKdPdmPKdPdm .L24: movl $256, 84(%rsp) movl $1, 88(%rsp) movl $1, 92(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $0, %r9d movl $0, %r8d movq 84(%rsp), %rdx movl $1, %ecx movq 72(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L25 movl $512, %edx movq 40(%rsp), %rsi movq 32(%rsp), %rdi call _Z32__device_stub__Z9block_sumPKdPdmPKdPdm .L25: movl $0, %esi movq 64(%rsp), %rdi call cudaEventRecord@PLT leaq 48(%rsp), %rdi movl $2, %ecx movl $8, %edx movq 40(%rsp), %rsi call cudaMemcpy@PLT movq 64(%rsp), %rdi call cudaEventSynchronize@PLT movl $0x00000000, 84(%rsp) leaq 84(%rsp), %rdi movq 64(%rsp), %rdx movq 56(%rsp), %rsi call cudaEventElapsedTime@PLT leaq .LC5(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movsd 48(%rsp), %xmm0 call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT leaq .LC6(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi pxor %xmm0, %xmm0 cvtss2sd 84(%rsp), %xmm0 call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cudaFree@PLT .LEHE1: leaq 96(%rsp), %rdi call _ZNSt6vectorIdSaIdEED1Ev movq 120(%rsp), %rax subq %fs:40, %rax jne .L35 movl $0, %eax subq $-128, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state endbr64 movq %rax, %rbx leaq 96(%rsp), %rdi call _ZNSt6vectorIdSaIdEED1Ev movq 120(%rsp), %rax subq %fs:40, %rax je .L27 call __stack_chk_fail@PLT .L27: movq %rbx, %rdi .LEHB2: call _Unwind_Resume@PLT .LEHE2: .L35: call __stack_chk_fail@PLT .cfi_endproc .LFE4072: .globl __gxx_personality_v0 .section .gcc_except_table,"a",@progbits .LLSDA4072: .byte 0xff .byte 0xff .byte 0x1 .uleb128 .LLSDACSE4072-.LLSDACSB4072 .LLSDACSB4072: .uleb128 .LEHB0-.LFB4072 .uleb128 .LEHE0-.LEHB0 .uleb128 0 .uleb128 0 .uleb128 .LEHB1-.LFB4072 .uleb128 .LEHE1-.LEHB1 .uleb128 .L29-.LFB4072 .uleb128 0 .uleb128 .LEHB2-.LFB4072 .uleb128 .LEHE2-.LEHB2 .uleb128 0 .uleb128 0 .LLSDACSE4072: .text .size main, .-main .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 0 .long 1072693248 .hidden DW.ref.__gxx_personality_v0 .weak DW.ref.__gxx_personality_v0 .section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat .align 8 .type DW.ref.__gxx_personality_v0, @object .size DW.ref.__gxx_personality_v0, 8 DW.ref.__gxx_personality_v0: .quad __gxx_personality_v0 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdlib.h> #include <stdio.h> #include <vector> #include <numeric> #include <iostream> // Here you can set the device ID that was assigned to you #define MYDEVICE 0 double random_double(void) { return 1.0; // return static_cast<double>(rand()) / RAND_MAX; // summing random doubles give numerical instability. how to do collaborative sorting? } // Part 1 of 6: implement the kernel __global__ void block_sum(const double *input, double *per_block_results, const size_t n) { //fill me __shared__ double sdata[256]; int i = 512 * blockIdx.x + threadIdx.x; if (i + 256 < n){ sdata[threadIdx.x] = input[i] + input[i + 256]; __syncthreads(); //atomicAdd(&per_block_results[blockIdx.x], sdata[threadIdx.x]); int totalThreads = blockDim.x; while(totalThreads >1){ totalThreads = (totalThreads >> 1); if (threadIdx.x < totalThreads){ sdata[threadIdx.x] += sdata[threadIdx.x + totalThreads]; } __syncthreads(); } } __syncthreads(); per_block_results[blockIdx.x] = sdata[0]; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(void) { const int blockDim = 256; // create array of 256ki elements const int num_elements = 1<<18; //const int num_elements = 512; srand(time(NULL)); // generate random input on the host std::vector<double> h_input(num_elements); for(int i = 0; i < h_input.size(); ++i) { h_input[i] = random_double(); } const double host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f); std::cerr << "Host sum: " << host_result << std::endl; //Part 1 of 6: move input to device memory double *d_input = 0; cudaMalloc((void**)&d_input, num_elements * sizeof(double) ); cudaMemcpy(d_input, h_input.data(), num_elements * sizeof(double), cudaMemcpyHostToDevice); // Part 1 of 6: allocate the partial sums: How much space does it need? double *d_partial_sums_and_total = 0; cudaMalloc((void**)&d_partial_sums_and_total, num_elements / blockDim / 2 * sizeof(double) ); // Part 1 of 6: copy the result back to the host double *d_result = 0; double device_result = 0; cudaMalloc((void**)&d_result, 1 * sizeof(double)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // Part 1 of 6: launch one kernel to compute, per-block, a partial sum. How much shared memory does it need? block_sum<<<num_elements / blockDim / 2, blockDim>>>(d_input, d_partial_sums_and_total, num_elements); block_sum<<<1, blockDim>>>(d_partial_sums_and_total, d_result, num_elements / blockDim /2); cudaEventRecord(stop); // Part 1 of 6: compute the sum of the partial sums cudaMemcpy(&device_result, d_result, 1 * sizeof(double), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float timeinms = 0.0; cudaEventElapsedTime(&timeinms, start, stop); std::cout << "Device sum: " << device_result << std::endl; std::cout << "ElapsedTime:" << timeinms << std::endl; // Part 1 of 6: deallocate device memory cudaFree(d_input); cudaFree(d_partial_sums_and_total); cudaFree(d_result); return 0; }
#include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <vector> #include <numeric> #include <iostream> // Here you can set the device ID that was assigned to you #define MYDEVICE 0 double random_double(void) { return 1.0; // return static_cast<double>(rand()) / RAND_MAX; // summing random doubles give numerical instability. how to do collaborative sorting? } // Part 1 of 6: implement the kernel __global__ void block_sum(const double *input, double *per_block_results, const size_t n) { //fill me __shared__ double sdata[256]; int i = 512 * blockIdx.x + threadIdx.x; if (i + 256 < n){ sdata[threadIdx.x] = input[i] + input[i + 256]; __syncthreads(); //atomicAdd(&per_block_results[blockIdx.x], sdata[threadIdx.x]); int totalThreads = blockDim.x; while(totalThreads >1){ totalThreads = (totalThreads >> 1); if (threadIdx.x < totalThreads){ sdata[threadIdx.x] += sdata[threadIdx.x + totalThreads]; } __syncthreads(); } } __syncthreads(); per_block_results[blockIdx.x] = sdata[0]; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(void) { const int blockDim = 256; // create array of 256ki elements const int num_elements = 1<<18; //const int num_elements = 512; srand(time(NULL)); // generate random input on the host std::vector<double> h_input(num_elements); for(int i = 0; i < h_input.size(); ++i) { h_input[i] = random_double(); } const double host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f); std::cerr << "Host sum: " << host_result << std::endl; //Part 1 of 6: move input to device memory double *d_input = 0; hipMalloc((void**)&d_input, num_elements * sizeof(double) ); hipMemcpy(d_input, h_input.data(), num_elements * sizeof(double), hipMemcpyHostToDevice); // Part 1 of 6: allocate the partial sums: How much space does it need? double *d_partial_sums_and_total = 0; hipMalloc((void**)&d_partial_sums_and_total, num_elements / blockDim / 2 * sizeof(double) ); // Part 1 of 6: copy the result back to the host double *d_result = 0; double device_result = 0; hipMalloc((void**)&d_result, 1 * sizeof(double)); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // Part 1 of 6: launch one kernel to compute, per-block, a partial sum. How much shared memory does it need? block_sum<<<num_elements / blockDim / 2, blockDim>>>(d_input, d_partial_sums_and_total, num_elements); block_sum<<<1, blockDim>>>(d_partial_sums_and_total, d_result, num_elements / blockDim /2); hipEventRecord(stop); // Part 1 of 6: compute the sum of the partial sums hipMemcpy(&device_result, d_result, 1 * sizeof(double), hipMemcpyDeviceToHost); hipEventSynchronize(stop); float timeinms = 0.0; hipEventElapsedTime(&timeinms, start, stop); std::cout << "Device sum: " << device_result << std::endl; std::cout << "ElapsedTime:" << timeinms << std::endl; // Part 1 of 6: deallocate device memory hipFree(d_input); hipFree(d_partial_sums_and_total); hipFree(d_result); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <vector> #include <numeric> #include <iostream> // Here you can set the device ID that was assigned to you #define MYDEVICE 0 double random_double(void) { return 1.0; // return static_cast<double>(rand()) / RAND_MAX; // summing random doubles give numerical instability. how to do collaborative sorting? } // Part 1 of 6: implement the kernel __global__ void block_sum(const double *input, double *per_block_results, const size_t n) { //fill me __shared__ double sdata[256]; int i = 512 * blockIdx.x + threadIdx.x; if (i + 256 < n){ sdata[threadIdx.x] = input[i] + input[i + 256]; __syncthreads(); //atomicAdd(&per_block_results[blockIdx.x], sdata[threadIdx.x]); int totalThreads = blockDim.x; while(totalThreads >1){ totalThreads = (totalThreads >> 1); if (threadIdx.x < totalThreads){ sdata[threadIdx.x] += sdata[threadIdx.x + totalThreads]; } __syncthreads(); } } __syncthreads(); per_block_results[blockIdx.x] = sdata[0]; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(void) { const int blockDim = 256; // create array of 256ki elements const int num_elements = 1<<18; //const int num_elements = 512; srand(time(NULL)); // generate random input on the host std::vector<double> h_input(num_elements); for(int i = 0; i < h_input.size(); ++i) { h_input[i] = random_double(); } const double host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f); std::cerr << "Host sum: " << host_result << std::endl; //Part 1 of 6: move input to device memory double *d_input = 0; hipMalloc((void**)&d_input, num_elements * sizeof(double) ); hipMemcpy(d_input, h_input.data(), num_elements * sizeof(double), hipMemcpyHostToDevice); // Part 1 of 6: allocate the partial sums: How much space does it need? double *d_partial_sums_and_total = 0; hipMalloc((void**)&d_partial_sums_and_total, num_elements / blockDim / 2 * sizeof(double) ); // Part 1 of 6: copy the result back to the host double *d_result = 0; double device_result = 0; hipMalloc((void**)&d_result, 1 * sizeof(double)); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // Part 1 of 6: launch one kernel to compute, per-block, a partial sum. How much shared memory does it need? block_sum<<<num_elements / blockDim / 2, blockDim>>>(d_input, d_partial_sums_and_total, num_elements); block_sum<<<1, blockDim>>>(d_partial_sums_and_total, d_result, num_elements / blockDim /2); hipEventRecord(stop); // Part 1 of 6: compute the sum of the partial sums hipMemcpy(&device_result, d_result, 1 * sizeof(double), hipMemcpyDeviceToHost); hipEventSynchronize(stop); float timeinms = 0.0; hipEventElapsedTime(&timeinms, start, stop); std::cout << "Device sum: " << device_result << std::endl; std::cout << "ElapsedTime:" << timeinms << std::endl; // Part 1 of 6: deallocate device memory hipFree(d_input); hipFree(d_partial_sums_and_total); hipFree(d_result); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9block_sumPKdPdm .globl _Z9block_sumPKdPdm .p2align 8 .type _Z9block_sumPKdPdm,@function _Z9block_sumPKdPdm: s_load_b64 s[4:5], s[0:1], 0x10 s_mov_b32 s2, s15 s_mov_b32 s3, exec_lo v_lshl_add_u32 v3, s2, 9, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v1, 0x100, v3 v_ashrrev_i32_e32 v2, 31, v1 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_u64_e64 s[4:5], v[1:2] s_cbranch_execz .LBB0_6 s_load_b64 s[4:5], s[0:1], 0x0 v_ashrrev_i32_e32 v4, 31, v3 v_lshlrev_b64 v[1:2], 3, v[1:2] s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 3, v[3:4] s_waitcnt lgkmcnt(0) v_add_co_u32 v3, vcc_lo, s4, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo v_add_co_u32 v1, vcc_lo, s4, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo s_clause 0x1 global_load_b64 v[3:4], v[3:4], off global_load_b64 v[1:2], v[1:2], off s_waitcnt vmcnt(0) v_add_f64 v[2:3], v[3:4], v[1:2] v_lshlrev_b32_e32 v1, 3, v0 ds_store_b64 v1, v[2:3] s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_load_b32 s4, s[0:1], 0x24 s_waitcnt lgkmcnt(0) v_cmp_lt_u16_e64 s5, s4, 2 s_delay_alu instid0(VALU_DEP_1) s_and_b32 vcc_lo, exec_lo, s5 s_cbranch_vccnz .LBB0_6 s_and_b32 s4, 0xffff, s4 s_branch .LBB0_4 .p2align 6 .LBB0_3: s_or_b32 exec_lo, exec_lo, s6 s_cmp_gt_u32 s4, 3 s_mov_b32 s4, s5 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc0 .LBB0_6 .LBB0_4: s_lshr_b32 s5, s4, 1 s_mov_b32 s6, exec_lo v_cmpx_gt_u32_e64 s5, v0 s_cbranch_execz .LBB0_3 v_add_lshl_u32 v2, s5, v0, 3 ds_load_b64 v[2:3], v2 ds_load_b64 v[4:5], v1 s_waitcnt lgkmcnt(0) v_add_f64 v[2:3], v[2:3], v[4:5] ds_store_b64 v1, v[2:3] s_branch .LBB0_3 .LBB0_6: s_or_b32 exec_lo, exec_lo, s3 v_mov_b32_e32 v2, 0 s_load_b64 s[0:1], s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv ds_load_b64 v[0:1], v2 s_mov_b32 s3, 0 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_lshl_b64 s[2:3], s[2:3], 3 s_add_u32 s0, s0, s2 s_addc_u32 s1, s1, s3 s_waitcnt lgkmcnt(0) global_store_b64 v2, v[0:1], s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9block_sumPKdPdm .amdhsa_group_segment_fixed_size 2048 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9block_sumPKdPdm, .Lfunc_end0-_Z9block_sumPKdPdm .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 8 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 2048 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9block_sumPKdPdm .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z9block_sumPKdPdm.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <vector> #include <numeric> #include <iostream> // Here you can set the device ID that was assigned to you #define MYDEVICE 0 double random_double(void) { return 1.0; // return static_cast<double>(rand()) / RAND_MAX; // summing random doubles give numerical instability. how to do collaborative sorting? } // Part 1 of 6: implement the kernel __global__ void block_sum(const double *input, double *per_block_results, const size_t n) { //fill me __shared__ double sdata[256]; int i = 512 * blockIdx.x + threadIdx.x; if (i + 256 < n){ sdata[threadIdx.x] = input[i] + input[i + 256]; __syncthreads(); //atomicAdd(&per_block_results[blockIdx.x], sdata[threadIdx.x]); int totalThreads = blockDim.x; while(totalThreads >1){ totalThreads = (totalThreads >> 1); if (threadIdx.x < totalThreads){ sdata[threadIdx.x] += sdata[threadIdx.x + totalThreads]; } __syncthreads(); } } __syncthreads(); per_block_results[blockIdx.x] = sdata[0]; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(void) { const int blockDim = 256; // create array of 256ki elements const int num_elements = 1<<18; //const int num_elements = 512; srand(time(NULL)); // generate random input on the host std::vector<double> h_input(num_elements); for(int i = 0; i < h_input.size(); ++i) { h_input[i] = random_double(); } const double host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f); std::cerr << "Host sum: " << host_result << std::endl; //Part 1 of 6: move input to device memory double *d_input = 0; hipMalloc((void**)&d_input, num_elements * sizeof(double) ); hipMemcpy(d_input, h_input.data(), num_elements * sizeof(double), hipMemcpyHostToDevice); // Part 1 of 6: allocate the partial sums: How much space does it need? double *d_partial_sums_and_total = 0; hipMalloc((void**)&d_partial_sums_and_total, num_elements / blockDim / 2 * sizeof(double) ); // Part 1 of 6: copy the result back to the host double *d_result = 0; double device_result = 0; hipMalloc((void**)&d_result, 1 * sizeof(double)); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // Part 1 of 6: launch one kernel to compute, per-block, a partial sum. How much shared memory does it need? block_sum<<<num_elements / blockDim / 2, blockDim>>>(d_input, d_partial_sums_and_total, num_elements); block_sum<<<1, blockDim>>>(d_partial_sums_and_total, d_result, num_elements / blockDim /2); hipEventRecord(stop); // Part 1 of 6: compute the sum of the partial sums hipMemcpy(&device_result, d_result, 1 * sizeof(double), hipMemcpyDeviceToHost); hipEventSynchronize(stop); float timeinms = 0.0; hipEventElapsedTime(&timeinms, start, stop); std::cout << "Device sum: " << device_result << std::endl; std::cout << "ElapsedTime:" << timeinms << std::endl; // Part 1 of 6: deallocate device memory hipFree(d_input); hipFree(d_partial_sums_and_total); hipFree(d_result); return 0; }
.text .file "reduction2.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z13random_doublev .LCPI0_0: .quad 0x3ff0000000000000 # double 1 .text .globl _Z13random_doublev .p2align 4, 0x90 .type _Z13random_doublev,@function _Z13random_doublev: # @_Z13random_doublev .cfi_startproc # %bb.0: movsd .LCPI0_0(%rip), %xmm0 # xmm0 = mem[0],zero retq .Lfunc_end0: .size _Z13random_doublev, .Lfunc_end0-_Z13random_doublev .cfi_endproc # -- End function .globl _Z24__device_stub__block_sumPKdPdm # -- Begin function _Z24__device_stub__block_sumPKdPdm .p2align 4, 0x90 .type _Z24__device_stub__block_sumPKdPdm,@function _Z24__device_stub__block_sumPKdPdm: # @_Z24__device_stub__block_sumPKdPdm .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9block_sumPKdPdm, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end1: .size _Z24__device_stub__block_sumPKdPdm, .Lfunc_end1-_Z24__device_stub__block_sumPKdPdm .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .Lfunc_begin0: .cfi_startproc .cfi_personality 3, __gxx_personality_v0 .cfi_lsda 3, .Lexception0 # %bb.0: # %_ZNSt6vectorIdSaIdEEC2EmRKS0_.exit pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $160, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 .cfi_escape 0x2e, 0x00 xorl %r14d, %r14d xorl %edi, %edi callq time .cfi_escape 0x2e, 0x00 movl %eax, %edi callq srand .cfi_escape 0x2e, 0x00 movl $2097152, %edi # imm = 0x200000 callq _Znwm movq %rax, %rbx .cfi_escape 0x2e, 0x00 movl $2097152, %edx # imm = 0x200000 movq %rax, %rdi xorl %esi, %esi callq memset@PLT movabsq $4607182418800017408, %rax # imm = 0x3FF0000000000000 .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 movq %rax, (%rbx,%r14,8) incq %r14 cmpq $262144, %r14 # imm = 0x40000 jne .LBB2_1 # %bb.2: # %.lr.ph.i.preheader xorps %xmm0, %xmm0 xorl %eax, %eax .p2align 4, 0x90 .LBB2_3: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 cvtss2sd %xmm0, %xmm0 addsd (%rbx,%rax), %xmm0 cvtsd2ss %xmm0, %xmm0 addq $8, %rax cmpq $2097152, %rax # imm = 0x200000 jne .LBB2_3 # %bb.4: # %_ZSt10accumulateIN9__gnu_cxx17__normal_iteratorIPdSt6vectorIdSaIdEEEEfET0_T_S8_S7_.exit .Ltmp0: movss %xmm0, 108(%rsp) # 4-byte Spill .cfi_escape 0x2e, 0x00 movl $_ZSt4cerr, %edi movl $.L.str, %esi movl $10, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp1: # %bb.5: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit movss 108(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 .Ltmp2: .cfi_escape 0x2e, 0x00 movl $_ZSt4cerr, %edi callq _ZNSo9_M_insertIdEERSoT_ .Ltmp3: # %bb.6: # %_ZNSolsEd.exit movq %rax, %r14 movq (%rax), %rax movq -24(%rax), %rax movq 240(%r14,%rax), %r15 testq %r15, %r15 je .LBB2_7 # %bb.9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%r15) je .LBB2_11 # %bb.10: movzbl 67(%r15), %eax jmp .LBB2_13 .LBB2_11: .Ltmp4: .cfi_escape 0x2e, 0x00 movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp5: # %bb.12: # %.noexc54 movq (%r15), %rax .Ltmp6: .cfi_escape 0x2e, 0x00 movq %r15, %rdi movl $10, %esi callq *48(%rax) .Ltmp7: .LBB2_13: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i .Ltmp8: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movq %r14, %rdi callq _ZNSo3putEc .Ltmp9: # %bb.14: # %.noexc56 .Ltmp10: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp11: # %bb.15: # %_ZNSolsEPFRSoS_E.exit movq $0, 16(%rsp) .Ltmp12: .cfi_escape 0x2e, 0x00 leaq 16(%rsp), %rdi movl $2097152, %esi # imm = 0x200000 callq hipMalloc .Ltmp13: # %bb.16: movq 16(%rsp), %rdi .Ltmp14: .cfi_escape 0x2e, 0x00 movl $2097152, %edx # imm = 0x200000 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy .Ltmp15: # %bb.17: movq $0, 8(%rsp) .Ltmp17: .cfi_escape 0x2e, 0x00 leaq 8(%rsp), %rdi movl $4096, %esi # imm = 0x1000 callq hipMalloc .Ltmp18: # %bb.18: movq $0, (%rsp) movq $0, 152(%rsp) .Ltmp20: .cfi_escape 0x2e, 0x00 movq %rsp, %rdi movl $8, %esi callq hipMalloc .Ltmp21: # %bb.19: .Ltmp23: .cfi_escape 0x2e, 0x00 leaq 144(%rsp), %rdi callq hipEventCreate .Ltmp24: # %bb.20: .Ltmp25: .cfi_escape 0x2e, 0x00 leaq 24(%rsp), %rdi callq hipEventCreate .Ltmp26: # %bb.21: movq 144(%rsp), %rdi .Ltmp27: .cfi_escape 0x2e, 0x00 xorl %esi, %esi callq hipEventRecord .Ltmp28: # %bb.22: .Ltmp29: .cfi_escape 0x2e, 0x00 movabsq $4294967808, %rdi # imm = 0x100000200 movabsq $4294967552, %rdx # imm = 0x100000100 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration .Ltmp30: # %bb.23: testl %eax, %eax jne .LBB2_26 # %bb.24: movq 16(%rsp), %rax movq 8(%rsp), %rcx movq %rax, 96(%rsp) movq %rcx, 88(%rsp) movq $262144, 80(%rsp) # imm = 0x40000 leaq 96(%rsp), %rax movq %rax, 112(%rsp) leaq 88(%rsp), %rax movq %rax, 120(%rsp) leaq 80(%rsp), %rax movq %rax, 128(%rsp) .Ltmp31: .cfi_escape 0x2e, 0x00 leaq 64(%rsp), %rdi leaq 48(%rsp), %rsi leaq 40(%rsp), %rdx leaq 32(%rsp), %rcx callq __hipPopCallConfiguration .Ltmp32: # %bb.25: # %.noexc movq 64(%rsp), %rsi movl 72(%rsp), %edx movq 48(%rsp), %rcx movl 56(%rsp), %r8d .Ltmp33: .cfi_escape 0x2e, 0x10 leaq 112(%rsp), %r9 movl $_Z9block_sumPKdPdm, %edi pushq 32(%rsp) .cfi_adjust_cfa_offset 8 pushq 48(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .Ltmp34: .LBB2_26: .Ltmp35: .cfi_escape 0x2e, 0x00 movabsq $4294967297, %rdi # imm = 0x100000001 movabsq $4294967552, %rdx # imm = 0x100000100 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration .Ltmp36: # %bb.27: testl %eax, %eax jne .LBB2_30 # %bb.28: movq 8(%rsp), %rax movq (%rsp), %rcx movq %rax, 96(%rsp) movq %rcx, 88(%rsp) movq $512, 80(%rsp) # imm = 0x200 leaq 96(%rsp), %rax movq %rax, 112(%rsp) leaq 88(%rsp), %rax movq %rax, 120(%rsp) leaq 80(%rsp), %rax movq %rax, 128(%rsp) .Ltmp37: .cfi_escape 0x2e, 0x00 leaq 64(%rsp), %rdi leaq 48(%rsp), %rsi leaq 40(%rsp), %rdx leaq 32(%rsp), %rcx callq __hipPopCallConfiguration .Ltmp38: # %bb.29: # %.noexc36 movq 64(%rsp), %rsi movl 72(%rsp), %edx movq 48(%rsp), %rcx movl 56(%rsp), %r8d .Ltmp39: .cfi_escape 0x2e, 0x10 leaq 112(%rsp), %r9 movl $_Z9block_sumPKdPdm, %edi pushq 32(%rsp) .cfi_adjust_cfa_offset 8 pushq 48(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .Ltmp40: .LBB2_30: movq 24(%rsp), %rdi .Ltmp41: .cfi_escape 0x2e, 0x00 xorl %esi, %esi callq hipEventRecord .Ltmp42: # %bb.31: movq (%rsp), %rsi .Ltmp43: .cfi_escape 0x2e, 0x00 leaq 152(%rsp), %rdi movl $8, %edx movl $2, %ecx callq hipMemcpy .Ltmp44: # %bb.32: movq 24(%rsp), %rdi .Ltmp45: .cfi_escape 0x2e, 0x00 callq hipEventSynchronize .Ltmp46: # %bb.33: movl $0, 112(%rsp) movq 144(%rsp), %rsi movq 24(%rsp), %rdx .Ltmp48: .cfi_escape 0x2e, 0x00 leaq 112(%rsp), %rdi callq hipEventElapsedTime .Ltmp49: # %bb.34: .Ltmp50: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $12, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp51: # %bb.35: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit40 movsd 152(%rsp), %xmm0 # xmm0 = mem[0],zero .Ltmp52: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ .Ltmp53: # %bb.36: # %_ZNSolsEd.exit42 movq %rax, %r14 movq (%rax), %rax movq -24(%rax), %rax movq 240(%r14,%rax), %r15 testq %r15, %r15 je .LBB2_51 # %bb.37: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i59 cmpb $0, 56(%r15) je .LBB2_44 # %bb.38: movzbl 67(%r15), %eax jmp .LBB2_46 .LBB2_44: .Ltmp54: .cfi_escape 0x2e, 0x00 movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp55: # %bb.45: # %.noexc64 movq (%r15), %rax .Ltmp56: .cfi_escape 0x2e, 0x00 movq %r15, %rdi movl $10, %esi callq *48(%rax) .Ltmp57: .LBB2_46: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i61 .Ltmp58: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movq %r14, %rdi callq _ZNSo3putEc .Ltmp59: # %bb.47: # %.noexc66 .Ltmp60: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp61: # %bb.48: # %_ZNSolsEPFRSoS_E.exit44 .Ltmp62: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.2, %esi movl $12, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp63: # %bb.49: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit46 movss 112(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 .Ltmp64: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ .Ltmp65: # %bb.50: # %_ZNSolsEf.exit movq %rax, %r14 movq (%rax), %rax movq -24(%rax), %rax movq 240(%r14,%rax), %r15 testq %r15, %r15 je .LBB2_51 # %bb.53: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i70 cmpb $0, 56(%r15) je .LBB2_55 # %bb.54: movzbl 67(%r15), %eax jmp .LBB2_57 .LBB2_55: .Ltmp66: .cfi_escape 0x2e, 0x00 movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp67: # %bb.56: # %.noexc75 movq (%r15), %rax .Ltmp68: .cfi_escape 0x2e, 0x00 movq %r15, %rdi movl $10, %esi callq *48(%rax) .Ltmp69: .LBB2_57: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i72 .Ltmp70: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movq %r14, %rdi callq _ZNSo3putEc .Ltmp71: # %bb.58: # %.noexc77 .Ltmp72: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp73: # %bb.59: # %_ZNSolsEPFRSoS_E.exit49 movq 16(%rsp), %rdi .Ltmp74: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp75: # %bb.60: movq 8(%rsp), %rdi .Ltmp76: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp77: # %bb.61: movq (%rsp), %rdi .Ltmp78: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp79: # %bb.62: # %_ZNSt6vectorIdSaIdEED2Ev.exit .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _ZdlPv xorl %eax, %eax addq $160, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB2_51: # %.invoke .cfi_def_cfa_offset 192 .Ltmp80: .cfi_escape 0x2e, 0x00 callq _ZSt16__throw_bad_castv .Ltmp81: # %bb.52: # %.cont .LBB2_7: .Ltmp83: .cfi_escape 0x2e, 0x00 callq _ZSt16__throw_bad_castv .Ltmp84: # %bb.8: # %.noexc53 .LBB2_42: .Ltmp22: jmp .LBB2_64 .LBB2_41: .Ltmp19: jmp .LBB2_64 .LBB2_40: .Ltmp16: jmp .LBB2_64 .LBB2_39: .Ltmp85: jmp .LBB2_64 .LBB2_43: .Ltmp47: jmp .LBB2_64 .LBB2_63: .Ltmp82: .LBB2_64: movq %rax, %r14 .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _ZdlPv .cfi_escape 0x2e, 0x00 movq %r14, %rdi callq _Unwind_Resume@PLT .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc .section .gcc_except_table,"a",@progbits .p2align 2, 0x0 GCC_except_table2: .Lexception0: .byte 255 # @LPStart Encoding = omit .byte 255 # @TType Encoding = omit .byte 1 # Call site Encoding = uleb128 .uleb128 .Lcst_end0-.Lcst_begin0 .Lcst_begin0: .uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 << .uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0 .byte 0 # has no landing pad .byte 0 # On action: cleanup .uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 << .uleb128 .Ltmp11-.Ltmp0 # Call between .Ltmp0 and .Ltmp11 .uleb128 .Ltmp85-.Lfunc_begin0 # jumps to .Ltmp85 .byte 0 # On action: cleanup .uleb128 .Ltmp12-.Lfunc_begin0 # >> Call Site 3 << .uleb128 .Ltmp15-.Ltmp12 # Call between .Ltmp12 and .Ltmp15 .uleb128 .Ltmp16-.Lfunc_begin0 # jumps to .Ltmp16 .byte 0 # On action: cleanup .uleb128 .Ltmp17-.Lfunc_begin0 # >> Call Site 4 << .uleb128 .Ltmp18-.Ltmp17 # Call between .Ltmp17 and .Ltmp18 .uleb128 .Ltmp19-.Lfunc_begin0 # jumps to .Ltmp19 .byte 0 # On action: cleanup .uleb128 .Ltmp20-.Lfunc_begin0 # >> Call Site 5 << .uleb128 .Ltmp21-.Ltmp20 # Call between .Ltmp20 and .Ltmp21 .uleb128 .Ltmp22-.Lfunc_begin0 # jumps to .Ltmp22 .byte 0 # On action: cleanup .uleb128 .Ltmp23-.Lfunc_begin0 # >> Call Site 6 << .uleb128 .Ltmp46-.Ltmp23 # Call between .Ltmp23 and .Ltmp46 .uleb128 .Ltmp47-.Lfunc_begin0 # jumps to .Ltmp47 .byte 0 # On action: cleanup .uleb128 .Ltmp48-.Lfunc_begin0 # >> Call Site 7 << .uleb128 .Ltmp81-.Ltmp48 # Call between .Ltmp48 and .Ltmp81 .uleb128 .Ltmp82-.Lfunc_begin0 # jumps to .Ltmp82 .byte 0 # On action: cleanup .uleb128 .Ltmp83-.Lfunc_begin0 # >> Call Site 8 << .uleb128 .Ltmp84-.Ltmp83 # Call between .Ltmp83 and .Ltmp84 .uleb128 .Ltmp85-.Lfunc_begin0 # jumps to .Ltmp85 .byte 0 # On action: cleanup .uleb128 .Ltmp84-.Lfunc_begin0 # >> Call Site 9 << .uleb128 .Lfunc_end2-.Ltmp84 # Call between .Ltmp84 and .Lfunc_end2 .byte 0 # has no landing pad .byte 0 # On action: cleanup .Lcst_end0: .p2align 2, 0x0 # -- End function .text .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9block_sumPKdPdm, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z9block_sumPKdPdm,@object # @_Z9block_sumPKdPdm .section .rodata,"a",@progbits .globl _Z9block_sumPKdPdm .p2align 3, 0x0 _Z9block_sumPKdPdm: .quad _Z24__device_stub__block_sumPKdPdm .size _Z9block_sumPKdPdm, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Host sum: " .size .L.str, 11 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Device sum: " .size .L.str.1, 13 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "ElapsedTime:" .size .L.str.2, 13 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z9block_sumPKdPdm" .size .L__unnamed_1, 19 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__block_sumPKdPdm .addrsig_sym __gxx_personality_v0 .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Unwind_Resume .addrsig_sym _Z9block_sumPKdPdm .addrsig_sym _ZSt4cerr .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z9block_sumPKdPdm .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */ /* 0x000e240000002100 */ /*0040*/ IMAD R2, R8, 0x200, R9 ; /* 0x0000020008027824 */ /* 0x001fca00078e0209 */ /*0050*/ IADD3 R0, R2, 0x100, RZ ; /* 0x0000010002007810 */ /* 0x000fc80007ffe0ff */ /*0060*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */ /* 0x000fe40003f06070 */ /*0070*/ SHF.R.S32.HI R0, RZ, 0x1f, R0 ; /* 0x0000001fff007819 */ /* 0x000fc80000011400 */ /*0080*/ ISETP.GE.U32.AND.EX P0, PT, R0, c[0x0][0x174], PT, P0 ; /* 0x00005d0000007a0c */ /* 0x000fda0003f06100 */ /*0090*/ @P0 BRA 0x230 ; /* 0x0000019000000947 */ /* 0x000fea0003800000 */ /*00a0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */ /* 0x000fc800078e00ff */ /*00b0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fca00078e0203 */ /*00c0*/ LDG.E.64 R4, [R2.64+0x800] ; /* 0x0008000402047981 */ /* 0x000ea8000c1e1b00 */ /*00d0*/ LDG.E.64 R6, [R2.64] ; /* 0x0000000402067981 */ /* 0x000ea2000c1e1b00 */ /*00e0*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */ /* 0x000fe20000000f00 */ /*00f0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */ /* 0x000fe60003800000 */ /*0100*/ ISETP.GE.AND P0, PT, R0, 0x2, PT ; /* 0x000000020000780c */ /* 0x000fe20003f06270 */ /*0110*/ DADD R4, R4, R6 ; /* 0x0000000004047229 */ /* 0x004e0e0000000006 */ /*0120*/ STS.64 [R9.X8], R4 ; /* 0x0000000409007388 */ /* 0x0011e80000008a00 */ /*0130*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0140*/ @!P0 BRA 0x230 ; /* 0x000000e000008947 */ /* 0x000fea0003800000 */ /*0150*/ IMAD.SHL.U32 R0, R9, 0x8, RZ ; /* 0x0000000809007824 */ /* 0x000fe400078e00ff */ /*0160*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff067624 */ /* 0x000fca00078e00ff */ /*0170*/ SHF.R.U32.HI R7, RZ, 0x1, R6 ; /* 0x00000001ff077819 */ /* 0x000fe20000011606 */ /*0180*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */ /* 0x000fe60003800000 */ /*0190*/ ISETP.GE.U32.AND P0, PT, R9, R7, PT ; /* 0x000000070900720c */ /* 0x000fda0003f06070 */ /*01a0*/ @!P0 LEA R4, R7, R0, 0x3 ; /* 0x0000000007048211 */ /* 0x001fe200078e18ff */ /*01b0*/ @!P0 LDS.64 R2, [R9.X8] ; /* 0x0000000009028984 */ /* 0x000fea0000008a00 */ /*01c0*/ @!P0 LDS.64 R4, [R4] ; /* 0x0000000004048984 */ /* 0x000e240000000a00 */ /*01d0*/ @!P0 DADD R2, R2, R4 ; /* 0x0000000002028229 */ /* 0x001e0e0000000004 */ /*01e0*/ @!P0 STS.64 [R9.X8], R2 ; /* 0x0000000209008388 */ /* 0x0011e80000008a00 */ /*01f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0200*/ ISETP.GT.U32.AND P0, PT, R6, 0x3, PT ; /* 0x000000030600780c */ /* 0x000fe20003f04070 */ /*0210*/ IMAD.MOV.U32 R6, RZ, RZ, R7 ; /* 0x000000ffff067224 */ /* 0x000fd800078e0007 */ /*0220*/ @P0 BRA 0x170 ; /* 0xffffff4000000947 */ /* 0x001fea000383ffff */ /*0230*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */ /* 0x000fe20003800000 */ /*0240*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0250*/ MOV R5, 0x8 ; /* 0x0000000800057802 */ /* 0x001fca0000000f00 */ /*0260*/ IMAD.WIDE.U32 R4, R8, R5, c[0x0][0x168] ; /* 0x00005a0008047625 */ /* 0x000fe200078e0005 */ /*0270*/ LDS.64 R2, [RZ] ; /* 0x00000000ff027984 */ /* 0x000e280000000a00 */ /*0280*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */ /* 0x001fe2000c101b04 */ /*0290*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*02a0*/ BRA 0x2a0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9block_sumPKdPdm .globl _Z9block_sumPKdPdm .p2align 8 .type _Z9block_sumPKdPdm,@function _Z9block_sumPKdPdm: s_load_b64 s[4:5], s[0:1], 0x10 s_mov_b32 s2, s15 s_mov_b32 s3, exec_lo v_lshl_add_u32 v3, s2, 9, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v1, 0x100, v3 v_ashrrev_i32_e32 v2, 31, v1 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_u64_e64 s[4:5], v[1:2] s_cbranch_execz .LBB0_6 s_load_b64 s[4:5], s[0:1], 0x0 v_ashrrev_i32_e32 v4, 31, v3 v_lshlrev_b64 v[1:2], 3, v[1:2] s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 3, v[3:4] s_waitcnt lgkmcnt(0) v_add_co_u32 v3, vcc_lo, s4, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo v_add_co_u32 v1, vcc_lo, s4, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo s_clause 0x1 global_load_b64 v[3:4], v[3:4], off global_load_b64 v[1:2], v[1:2], off s_waitcnt vmcnt(0) v_add_f64 v[2:3], v[3:4], v[1:2] v_lshlrev_b32_e32 v1, 3, v0 ds_store_b64 v1, v[2:3] s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_load_b32 s4, s[0:1], 0x24 s_waitcnt lgkmcnt(0) v_cmp_lt_u16_e64 s5, s4, 2 s_delay_alu instid0(VALU_DEP_1) s_and_b32 vcc_lo, exec_lo, s5 s_cbranch_vccnz .LBB0_6 s_and_b32 s4, 0xffff, s4 s_branch .LBB0_4 .p2align 6 .LBB0_3: s_or_b32 exec_lo, exec_lo, s6 s_cmp_gt_u32 s4, 3 s_mov_b32 s4, s5 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc0 .LBB0_6 .LBB0_4: s_lshr_b32 s5, s4, 1 s_mov_b32 s6, exec_lo v_cmpx_gt_u32_e64 s5, v0 s_cbranch_execz .LBB0_3 v_add_lshl_u32 v2, s5, v0, 3 ds_load_b64 v[2:3], v2 ds_load_b64 v[4:5], v1 s_waitcnt lgkmcnt(0) v_add_f64 v[2:3], v[2:3], v[4:5] ds_store_b64 v1, v[2:3] s_branch .LBB0_3 .LBB0_6: s_or_b32 exec_lo, exec_lo, s3 v_mov_b32_e32 v2, 0 s_load_b64 s[0:1], s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv ds_load_b64 v[0:1], v2 s_mov_b32 s3, 0 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_lshl_b64 s[2:3], s[2:3], 3 s_add_u32 s0, s0, s2 s_addc_u32 s1, s1, s3 s_waitcnt lgkmcnt(0) global_store_b64 v2, v[0:1], s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9block_sumPKdPdm .amdhsa_group_segment_fixed_size 2048 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9block_sumPKdPdm, .Lfunc_end0-_Z9block_sumPKdPdm .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 8 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 2048 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9block_sumPKdPdm .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z9block_sumPKdPdm.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00021dcb_00000000-6_reduction2.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB4075: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4075: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z13random_doublev .type _Z13random_doublev, @function _Z13random_doublev: .LFB4071: .cfi_startproc endbr64 movsd .LC0(%rip), %xmm0 ret .cfi_endproc .LFE4071: .size _Z13random_doublev, .-_Z13random_doublev .globl _Z32__device_stub__Z9block_sumPKdPdmPKdPdm .type _Z32__device_stub__Z9block_sumPKdPdmPKdPdm, @function _Z32__device_stub__Z9block_sumPKdPdmPKdPdm: .LFB4097: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L8 .L4: movq 120(%rsp), %rax subq %fs:40, %rax jne .L9 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L8: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9block_sumPKdPdm(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L4 .L9: call __stack_chk_fail@PLT .cfi_endproc .LFE4097: .size _Z32__device_stub__Z9block_sumPKdPdmPKdPdm, .-_Z32__device_stub__Z9block_sumPKdPdmPKdPdm .globl _Z9block_sumPKdPdm .type _Z9block_sumPKdPdm, @function _Z9block_sumPKdPdm: .LFB4098: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z9block_sumPKdPdmPKdPdm addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4098: .size _Z9block_sumPKdPdm, .-_Z9block_sumPKdPdm .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "_Z9block_sumPKdPdm" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB4100: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z9block_sumPKdPdm(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4100: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .text._ZNSt6vectorIdSaIdEED2Ev,"axG",@progbits,_ZNSt6vectorIdSaIdEED5Ev,comdat .align 2 .weak _ZNSt6vectorIdSaIdEED2Ev .type _ZNSt6vectorIdSaIdEED2Ev, @function _ZNSt6vectorIdSaIdEED2Ev: .LFB4410: .cfi_startproc endbr64 movq (%rdi), %rax testq %rax, %rax je .L17 subq $8, %rsp .cfi_def_cfa_offset 16 movq 16(%rdi), %rsi subq %rax, %rsi movq %rax, %rdi call _ZdlPvm@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .L17: ret .cfi_endproc .LFE4410: .size _ZNSt6vectorIdSaIdEED2Ev, .-_ZNSt6vectorIdSaIdEED2Ev .weak _ZNSt6vectorIdSaIdEED1Ev .set _ZNSt6vectorIdSaIdEED1Ev,_ZNSt6vectorIdSaIdEED2Ev .section .rodata.str1.1 .LC4: .string "Host sum: " .LC5: .string "Device sum: " .LC6: .string "ElapsedTime:" .text .globl main .type main, @function main: .LFB4072: .cfi_startproc .cfi_personality 0x9b,DW.ref.__gxx_personality_v0 .cfi_lsda 0x1b,.LLSDA4072 endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 addq $-128, %rsp .cfi_def_cfa_offset 144 movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT movl $2097152, %edi .LEHB0: call _Znwm@PLT .LEHE0: movq %rax, %rbx movq %rax, 96(%rsp) leaq 2097152(%rax), %rdx movq %rdx, 112(%rsp) movq $0x000000000, (%rax) leaq 8(%rax), %rax .L21: movq $0x000000000, (%rax) addq $8, %rax cmpq %rdx, %rax jne .L21 movq %rdx, 104(%rsp) movq %rbx, %rax leaq 2097152(%rbx), %rcx movq %rbx, %rdx movsd .LC0(%rip), %xmm0 .L22: movsd %xmm0, (%rdx) addq $8, %rdx cmpq %rcx, %rdx jne .L22 pxor %xmm0, %xmm0 .L23: cvtss2sd %xmm0, %xmm0 addsd (%rax), %xmm0 cvtsd2ss %xmm0, %xmm0 addq $8, %rax cmpq %rcx, %rax jne .L23 pxor %xmm1, %xmm1 cvtss2sd %xmm0, %xmm1 movsd %xmm1, 8(%rsp) leaq .LC4(%rip), %rsi leaq _ZSt4cerr(%rip), %rdi .LEHB1: call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movsd 8(%rsp), %xmm0 call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq $0, 24(%rsp) leaq 24(%rsp), %rdi movl $2097152, %esi call cudaMalloc@PLT movl $1, %ecx movl $2097152, %edx movq %rbx, %rsi movq 24(%rsp), %rdi call cudaMemcpy@PLT movq $0, 32(%rsp) leaq 32(%rsp), %rdi movl $4096, %esi call cudaMalloc@PLT movq $0, 40(%rsp) movq $0x000000000, 48(%rsp) leaq 40(%rsp), %rdi movl $8, %esi call cudaMalloc@PLT leaq 56(%rsp), %rdi call cudaEventCreate@PLT leaq 64(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 56(%rsp), %rdi call cudaEventRecord@PLT movl $256, 84(%rsp) movl $1, 88(%rsp) movl $1, 92(%rsp) movl $512, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $0, %r9d movl $0, %r8d movq 84(%rsp), %rdx movl $1, %ecx movq 72(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L24 movl $262144, %edx movq 32(%rsp), %rsi movq 24(%rsp), %rdi call _Z32__device_stub__Z9block_sumPKdPdmPKdPdm .L24: movl $256, 84(%rsp) movl $1, 88(%rsp) movl $1, 92(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $0, %r9d movl $0, %r8d movq 84(%rsp), %rdx movl $1, %ecx movq 72(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L25 movl $512, %edx movq 40(%rsp), %rsi movq 32(%rsp), %rdi call _Z32__device_stub__Z9block_sumPKdPdmPKdPdm .L25: movl $0, %esi movq 64(%rsp), %rdi call cudaEventRecord@PLT leaq 48(%rsp), %rdi movl $2, %ecx movl $8, %edx movq 40(%rsp), %rsi call cudaMemcpy@PLT movq 64(%rsp), %rdi call cudaEventSynchronize@PLT movl $0x00000000, 84(%rsp) leaq 84(%rsp), %rdi movq 64(%rsp), %rdx movq 56(%rsp), %rsi call cudaEventElapsedTime@PLT leaq .LC5(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movsd 48(%rsp), %xmm0 call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT leaq .LC6(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi pxor %xmm0, %xmm0 cvtss2sd 84(%rsp), %xmm0 call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cudaFree@PLT .LEHE1: leaq 96(%rsp), %rdi call _ZNSt6vectorIdSaIdEED1Ev movq 120(%rsp), %rax subq %fs:40, %rax jne .L35 movl $0, %eax subq $-128, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state endbr64 movq %rax, %rbx leaq 96(%rsp), %rdi call _ZNSt6vectorIdSaIdEED1Ev movq 120(%rsp), %rax subq %fs:40, %rax je .L27 call __stack_chk_fail@PLT .L27: movq %rbx, %rdi .LEHB2: call _Unwind_Resume@PLT .LEHE2: .L35: call __stack_chk_fail@PLT .cfi_endproc .LFE4072: .globl __gxx_personality_v0 .section .gcc_except_table,"a",@progbits .LLSDA4072: .byte 0xff .byte 0xff .byte 0x1 .uleb128 .LLSDACSE4072-.LLSDACSB4072 .LLSDACSB4072: .uleb128 .LEHB0-.LFB4072 .uleb128 .LEHE0-.LEHB0 .uleb128 0 .uleb128 0 .uleb128 .LEHB1-.LFB4072 .uleb128 .LEHE1-.LEHB1 .uleb128 .L29-.LFB4072 .uleb128 0 .uleb128 .LEHB2-.LFB4072 .uleb128 .LEHE2-.LEHB2 .uleb128 0 .uleb128 0 .LLSDACSE4072: .text .size main, .-main .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 0 .long 1072693248 .hidden DW.ref.__gxx_personality_v0 .weak DW.ref.__gxx_personality_v0 .section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat .align 8 .type DW.ref.__gxx_personality_v0, @object .size DW.ref.__gxx_personality_v0, 8 DW.ref.__gxx_personality_v0: .quad __gxx_personality_v0 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "reduction2.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z13random_doublev .LCPI0_0: .quad 0x3ff0000000000000 # double 1 .text .globl _Z13random_doublev .p2align 4, 0x90 .type _Z13random_doublev,@function _Z13random_doublev: # @_Z13random_doublev .cfi_startproc # %bb.0: movsd .LCPI0_0(%rip), %xmm0 # xmm0 = mem[0],zero retq .Lfunc_end0: .size _Z13random_doublev, .Lfunc_end0-_Z13random_doublev .cfi_endproc # -- End function .globl _Z24__device_stub__block_sumPKdPdm # -- Begin function _Z24__device_stub__block_sumPKdPdm .p2align 4, 0x90 .type _Z24__device_stub__block_sumPKdPdm,@function _Z24__device_stub__block_sumPKdPdm: # @_Z24__device_stub__block_sumPKdPdm .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9block_sumPKdPdm, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end1: .size _Z24__device_stub__block_sumPKdPdm, .Lfunc_end1-_Z24__device_stub__block_sumPKdPdm .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .Lfunc_begin0: .cfi_startproc .cfi_personality 3, __gxx_personality_v0 .cfi_lsda 3, .Lexception0 # %bb.0: # %_ZNSt6vectorIdSaIdEEC2EmRKS0_.exit pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $160, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 .cfi_escape 0x2e, 0x00 xorl %r14d, %r14d xorl %edi, %edi callq time .cfi_escape 0x2e, 0x00 movl %eax, %edi callq srand .cfi_escape 0x2e, 0x00 movl $2097152, %edi # imm = 0x200000 callq _Znwm movq %rax, %rbx .cfi_escape 0x2e, 0x00 movl $2097152, %edx # imm = 0x200000 movq %rax, %rdi xorl %esi, %esi callq memset@PLT movabsq $4607182418800017408, %rax # imm = 0x3FF0000000000000 .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 movq %rax, (%rbx,%r14,8) incq %r14 cmpq $262144, %r14 # imm = 0x40000 jne .LBB2_1 # %bb.2: # %.lr.ph.i.preheader xorps %xmm0, %xmm0 xorl %eax, %eax .p2align 4, 0x90 .LBB2_3: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 cvtss2sd %xmm0, %xmm0 addsd (%rbx,%rax), %xmm0 cvtsd2ss %xmm0, %xmm0 addq $8, %rax cmpq $2097152, %rax # imm = 0x200000 jne .LBB2_3 # %bb.4: # %_ZSt10accumulateIN9__gnu_cxx17__normal_iteratorIPdSt6vectorIdSaIdEEEEfET0_T_S8_S7_.exit .Ltmp0: movss %xmm0, 108(%rsp) # 4-byte Spill .cfi_escape 0x2e, 0x00 movl $_ZSt4cerr, %edi movl $.L.str, %esi movl $10, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp1: # %bb.5: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit movss 108(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 .Ltmp2: .cfi_escape 0x2e, 0x00 movl $_ZSt4cerr, %edi callq _ZNSo9_M_insertIdEERSoT_ .Ltmp3: # %bb.6: # %_ZNSolsEd.exit movq %rax, %r14 movq (%rax), %rax movq -24(%rax), %rax movq 240(%r14,%rax), %r15 testq %r15, %r15 je .LBB2_7 # %bb.9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%r15) je .LBB2_11 # %bb.10: movzbl 67(%r15), %eax jmp .LBB2_13 .LBB2_11: .Ltmp4: .cfi_escape 0x2e, 0x00 movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp5: # %bb.12: # %.noexc54 movq (%r15), %rax .Ltmp6: .cfi_escape 0x2e, 0x00 movq %r15, %rdi movl $10, %esi callq *48(%rax) .Ltmp7: .LBB2_13: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i .Ltmp8: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movq %r14, %rdi callq _ZNSo3putEc .Ltmp9: # %bb.14: # %.noexc56 .Ltmp10: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp11: # %bb.15: # %_ZNSolsEPFRSoS_E.exit movq $0, 16(%rsp) .Ltmp12: .cfi_escape 0x2e, 0x00 leaq 16(%rsp), %rdi movl $2097152, %esi # imm = 0x200000 callq hipMalloc .Ltmp13: # %bb.16: movq 16(%rsp), %rdi .Ltmp14: .cfi_escape 0x2e, 0x00 movl $2097152, %edx # imm = 0x200000 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy .Ltmp15: # %bb.17: movq $0, 8(%rsp) .Ltmp17: .cfi_escape 0x2e, 0x00 leaq 8(%rsp), %rdi movl $4096, %esi # imm = 0x1000 callq hipMalloc .Ltmp18: # %bb.18: movq $0, (%rsp) movq $0, 152(%rsp) .Ltmp20: .cfi_escape 0x2e, 0x00 movq %rsp, %rdi movl $8, %esi callq hipMalloc .Ltmp21: # %bb.19: .Ltmp23: .cfi_escape 0x2e, 0x00 leaq 144(%rsp), %rdi callq hipEventCreate .Ltmp24: # %bb.20: .Ltmp25: .cfi_escape 0x2e, 0x00 leaq 24(%rsp), %rdi callq hipEventCreate .Ltmp26: # %bb.21: movq 144(%rsp), %rdi .Ltmp27: .cfi_escape 0x2e, 0x00 xorl %esi, %esi callq hipEventRecord .Ltmp28: # %bb.22: .Ltmp29: .cfi_escape 0x2e, 0x00 movabsq $4294967808, %rdi # imm = 0x100000200 movabsq $4294967552, %rdx # imm = 0x100000100 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration .Ltmp30: # %bb.23: testl %eax, %eax jne .LBB2_26 # %bb.24: movq 16(%rsp), %rax movq 8(%rsp), %rcx movq %rax, 96(%rsp) movq %rcx, 88(%rsp) movq $262144, 80(%rsp) # imm = 0x40000 leaq 96(%rsp), %rax movq %rax, 112(%rsp) leaq 88(%rsp), %rax movq %rax, 120(%rsp) leaq 80(%rsp), %rax movq %rax, 128(%rsp) .Ltmp31: .cfi_escape 0x2e, 0x00 leaq 64(%rsp), %rdi leaq 48(%rsp), %rsi leaq 40(%rsp), %rdx leaq 32(%rsp), %rcx callq __hipPopCallConfiguration .Ltmp32: # %bb.25: # %.noexc movq 64(%rsp), %rsi movl 72(%rsp), %edx movq 48(%rsp), %rcx movl 56(%rsp), %r8d .Ltmp33: .cfi_escape 0x2e, 0x10 leaq 112(%rsp), %r9 movl $_Z9block_sumPKdPdm, %edi pushq 32(%rsp) .cfi_adjust_cfa_offset 8 pushq 48(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .Ltmp34: .LBB2_26: .Ltmp35: .cfi_escape 0x2e, 0x00 movabsq $4294967297, %rdi # imm = 0x100000001 movabsq $4294967552, %rdx # imm = 0x100000100 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration .Ltmp36: # %bb.27: testl %eax, %eax jne .LBB2_30 # %bb.28: movq 8(%rsp), %rax movq (%rsp), %rcx movq %rax, 96(%rsp) movq %rcx, 88(%rsp) movq $512, 80(%rsp) # imm = 0x200 leaq 96(%rsp), %rax movq %rax, 112(%rsp) leaq 88(%rsp), %rax movq %rax, 120(%rsp) leaq 80(%rsp), %rax movq %rax, 128(%rsp) .Ltmp37: .cfi_escape 0x2e, 0x00 leaq 64(%rsp), %rdi leaq 48(%rsp), %rsi leaq 40(%rsp), %rdx leaq 32(%rsp), %rcx callq __hipPopCallConfiguration .Ltmp38: # %bb.29: # %.noexc36 movq 64(%rsp), %rsi movl 72(%rsp), %edx movq 48(%rsp), %rcx movl 56(%rsp), %r8d .Ltmp39: .cfi_escape 0x2e, 0x10 leaq 112(%rsp), %r9 movl $_Z9block_sumPKdPdm, %edi pushq 32(%rsp) .cfi_adjust_cfa_offset 8 pushq 48(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .Ltmp40: .LBB2_30: movq 24(%rsp), %rdi .Ltmp41: .cfi_escape 0x2e, 0x00 xorl %esi, %esi callq hipEventRecord .Ltmp42: # %bb.31: movq (%rsp), %rsi .Ltmp43: .cfi_escape 0x2e, 0x00 leaq 152(%rsp), %rdi movl $8, %edx movl $2, %ecx callq hipMemcpy .Ltmp44: # %bb.32: movq 24(%rsp), %rdi .Ltmp45: .cfi_escape 0x2e, 0x00 callq hipEventSynchronize .Ltmp46: # %bb.33: movl $0, 112(%rsp) movq 144(%rsp), %rsi movq 24(%rsp), %rdx .Ltmp48: .cfi_escape 0x2e, 0x00 leaq 112(%rsp), %rdi callq hipEventElapsedTime .Ltmp49: # %bb.34: .Ltmp50: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $12, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp51: # %bb.35: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit40 movsd 152(%rsp), %xmm0 # xmm0 = mem[0],zero .Ltmp52: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ .Ltmp53: # %bb.36: # %_ZNSolsEd.exit42 movq %rax, %r14 movq (%rax), %rax movq -24(%rax), %rax movq 240(%r14,%rax), %r15 testq %r15, %r15 je .LBB2_51 # %bb.37: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i59 cmpb $0, 56(%r15) je .LBB2_44 # %bb.38: movzbl 67(%r15), %eax jmp .LBB2_46 .LBB2_44: .Ltmp54: .cfi_escape 0x2e, 0x00 movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp55: # %bb.45: # %.noexc64 movq (%r15), %rax .Ltmp56: .cfi_escape 0x2e, 0x00 movq %r15, %rdi movl $10, %esi callq *48(%rax) .Ltmp57: .LBB2_46: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i61 .Ltmp58: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movq %r14, %rdi callq _ZNSo3putEc .Ltmp59: # %bb.47: # %.noexc66 .Ltmp60: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp61: # %bb.48: # %_ZNSolsEPFRSoS_E.exit44 .Ltmp62: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.2, %esi movl $12, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp63: # %bb.49: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit46 movss 112(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 .Ltmp64: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ .Ltmp65: # %bb.50: # %_ZNSolsEf.exit movq %rax, %r14 movq (%rax), %rax movq -24(%rax), %rax movq 240(%r14,%rax), %r15 testq %r15, %r15 je .LBB2_51 # %bb.53: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i70 cmpb $0, 56(%r15) je .LBB2_55 # %bb.54: movzbl 67(%r15), %eax jmp .LBB2_57 .LBB2_55: .Ltmp66: .cfi_escape 0x2e, 0x00 movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp67: # %bb.56: # %.noexc75 movq (%r15), %rax .Ltmp68: .cfi_escape 0x2e, 0x00 movq %r15, %rdi movl $10, %esi callq *48(%rax) .Ltmp69: .LBB2_57: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i72 .Ltmp70: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movq %r14, %rdi callq _ZNSo3putEc .Ltmp71: # %bb.58: # %.noexc77 .Ltmp72: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp73: # %bb.59: # %_ZNSolsEPFRSoS_E.exit49 movq 16(%rsp), %rdi .Ltmp74: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp75: # %bb.60: movq 8(%rsp), %rdi .Ltmp76: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp77: # %bb.61: movq (%rsp), %rdi .Ltmp78: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp79: # %bb.62: # %_ZNSt6vectorIdSaIdEED2Ev.exit .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _ZdlPv xorl %eax, %eax addq $160, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB2_51: # %.invoke .cfi_def_cfa_offset 192 .Ltmp80: .cfi_escape 0x2e, 0x00 callq _ZSt16__throw_bad_castv .Ltmp81: # %bb.52: # %.cont .LBB2_7: .Ltmp83: .cfi_escape 0x2e, 0x00 callq _ZSt16__throw_bad_castv .Ltmp84: # %bb.8: # %.noexc53 .LBB2_42: .Ltmp22: jmp .LBB2_64 .LBB2_41: .Ltmp19: jmp .LBB2_64 .LBB2_40: .Ltmp16: jmp .LBB2_64 .LBB2_39: .Ltmp85: jmp .LBB2_64 .LBB2_43: .Ltmp47: jmp .LBB2_64 .LBB2_63: .Ltmp82: .LBB2_64: movq %rax, %r14 .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _ZdlPv .cfi_escape 0x2e, 0x00 movq %r14, %rdi callq _Unwind_Resume@PLT .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc .section .gcc_except_table,"a",@progbits .p2align 2, 0x0 GCC_except_table2: .Lexception0: .byte 255 # @LPStart Encoding = omit .byte 255 # @TType Encoding = omit .byte 1 # Call site Encoding = uleb128 .uleb128 .Lcst_end0-.Lcst_begin0 .Lcst_begin0: .uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 << .uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0 .byte 0 # has no landing pad .byte 0 # On action: cleanup .uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 << .uleb128 .Ltmp11-.Ltmp0 # Call between .Ltmp0 and .Ltmp11 .uleb128 .Ltmp85-.Lfunc_begin0 # jumps to .Ltmp85 .byte 0 # On action: cleanup .uleb128 .Ltmp12-.Lfunc_begin0 # >> Call Site 3 << .uleb128 .Ltmp15-.Ltmp12 # Call between .Ltmp12 and .Ltmp15 .uleb128 .Ltmp16-.Lfunc_begin0 # jumps to .Ltmp16 .byte 0 # On action: cleanup .uleb128 .Ltmp17-.Lfunc_begin0 # >> Call Site 4 << .uleb128 .Ltmp18-.Ltmp17 # Call between .Ltmp17 and .Ltmp18 .uleb128 .Ltmp19-.Lfunc_begin0 # jumps to .Ltmp19 .byte 0 # On action: cleanup .uleb128 .Ltmp20-.Lfunc_begin0 # >> Call Site 5 << .uleb128 .Ltmp21-.Ltmp20 # Call between .Ltmp20 and .Ltmp21 .uleb128 .Ltmp22-.Lfunc_begin0 # jumps to .Ltmp22 .byte 0 # On action: cleanup .uleb128 .Ltmp23-.Lfunc_begin0 # >> Call Site 6 << .uleb128 .Ltmp46-.Ltmp23 # Call between .Ltmp23 and .Ltmp46 .uleb128 .Ltmp47-.Lfunc_begin0 # jumps to .Ltmp47 .byte 0 # On action: cleanup .uleb128 .Ltmp48-.Lfunc_begin0 # >> Call Site 7 << .uleb128 .Ltmp81-.Ltmp48 # Call between .Ltmp48 and .Ltmp81 .uleb128 .Ltmp82-.Lfunc_begin0 # jumps to .Ltmp82 .byte 0 # On action: cleanup .uleb128 .Ltmp83-.Lfunc_begin0 # >> Call Site 8 << .uleb128 .Ltmp84-.Ltmp83 # Call between .Ltmp83 and .Ltmp84 .uleb128 .Ltmp85-.Lfunc_begin0 # jumps to .Ltmp85 .byte 0 # On action: cleanup .uleb128 .Ltmp84-.Lfunc_begin0 # >> Call Site 9 << .uleb128 .Lfunc_end2-.Ltmp84 # Call between .Ltmp84 and .Lfunc_end2 .byte 0 # has no landing pad .byte 0 # On action: cleanup .Lcst_end0: .p2align 2, 0x0 # -- End function .text .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9block_sumPKdPdm, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z9block_sumPKdPdm,@object # @_Z9block_sumPKdPdm .section .rodata,"a",@progbits .globl _Z9block_sumPKdPdm .p2align 3, 0x0 _Z9block_sumPKdPdm: .quad _Z24__device_stub__block_sumPKdPdm .size _Z9block_sumPKdPdm, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Host sum: " .size .L.str, 11 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Device sum: " .size .L.str.1, 13 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "ElapsedTime:" .size .L.str.2, 13 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z9block_sumPKdPdm" .size .L__unnamed_1, 19 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__block_sumPKdPdm .addrsig_sym __gxx_personality_v0 .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Unwind_Resume .addrsig_sym _Z9block_sumPKdPdm .addrsig_sym _ZSt4cerr .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
__device__ int __attribute__ ((noinline)) add(int *l, int *r, int i, int N) { if (i < N) { return l[i] + r[i] + add(l, r, N, N); } else { return l[0] + r[0]; } } extern "C" __global__ void vecAdd(int *l, int *r, int *p, size_t N, size_t iter) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; for (size_t i = 0; i < iter; ++i) { if (idx < N) { p[idx] = add(l, r, idx, N); } } }
code for sm_80 Function : vecAdd .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R18, SR_CTAID.X ; /* 0x0000000000127919 */ /* 0x000e220000002500 */ /*0020*/ ISETP.NE.U32.AND P0, PT, RZ, c[0x0][0x180], PT ; /* 0x00006000ff007a0c */ /* 0x000fc60003f05070 */ /*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e620000002100 */ /*0040*/ ISETP.NE.AND.EX P0, PT, RZ, c[0x0][0x184], PT, P0 ; /* 0x00006100ff007a0c */ /* 0x000fda0003f05300 */ /*0050*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0060*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff027624 */ /* 0x000fe200078e00ff */ /*0070*/ ULDC.64 UR36, c[0x0][0x118] ; /* 0x0000460000247ab9 */ /* 0x000fe20000000a00 */ /*0080*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff007624 */ /* 0x000fe400078e00ff */ /*0090*/ IMAD R18, R18, c[0x0][0x0], R3 ; /* 0x0000000012127a24 */ /* 0x003fe200078e0203 */ /*00a0*/ IADD3 R4, P1, R2.reuse, -0x1, RZ ; /* 0xffffffff02047810 */ /* 0x040fe40007f3e0ff */ /*00b0*/ LOP3.LUT R2, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302027812 */ /* 0x000fe400078ec0ff */ /*00c0*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */ /* 0x000fe40003f06070 */ /*00d0*/ IADD3.X R0, R0, -0x1, RZ, P1, !PT ; /* 0xffffffff00007810 */ /* 0x000fc40000ffe4ff */ /*00e0*/ LEA R22, P1, R18, c[0x0][0x170], 0x2 ; /* 0x00005c0012167a11 */ /* 0x000fe400078210ff */ /*00f0*/ ISETP.GE.U32.AND.EX P0, PT, R0, RZ, PT, P0 ; /* 0x000000ff0000720c */ /* 0x000fe40003f06100 */ /*0100*/ LEA.HI.X R23, R18, c[0x0][0x174], RZ, 0x2, P1 ; /* 0x00005d0012177a11 */ /* 0x000fd600008f14ff */ /*0110*/ @!P0 BRA 0x480 ; /* 0x0000036000008947 */ /* 0x000fea0003800000 */ /*0120*/ IADD3 R17, P0, R2, -c[0x0][0x180], RZ ; /* 0x8000600002117a10 */ /* 0x000fca0007f1e0ff */ /*0130*/ IMAD.X R16, RZ, RZ, ~c[0x0][0x184], P0 ; /* 0x80006100ff107624 */ /* 0x000fe400000e06ff */ /*0140*/ ISETP.GE.U32.AND P0, PT, R18, c[0x0][0x178], PT ; /* 0x00005e0012007a0c */ /* 0x000fe20003f06070 */ /*0150*/ BSSY B6, 0x460 ; /* 0x0000030000067945 */ /* 0x000fe20003800000 */ /*0160*/ IADD3 R17, P1, R17, 0x4, RZ ; /* 0x0000000411117810 */ /* 0x000fe40007f3e0ff */ /*0170*/ ISETP.GE.U32.AND.EX P0, PT, RZ, c[0x0][0x17c], PT, P0 ; /* 0x00005f00ff007a0c */ /* 0x000fc60003f06100 */ /*0180*/ IMAD.X R16, RZ, RZ, R16, P1 ; /* 0x000000ffff107224 */ /* 0x000fe200008e0610 */ /*0190*/ ISETP.NE.U32.AND P1, PT, R17, RZ, PT ; /* 0x000000ff1100720c */ /* 0x000fc80003f25070 */ /*01a0*/ ISETP.NE.AND.EX P1, PT, R16, RZ, PT, P1 ; /* 0x000000ff1000720c */ /* 0x000fc80003f25310 */ /*01b0*/ P2R R19, PR, RZ, 0x2 ; /* 0x00000002ff137803 */ /* 0x000fe20000000000 */ /*01c0*/ @P0 BRA 0x450 ; /* 0x0000028000000947 */ /* 0x001fea0003800000 */ /*01d0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff047624 */ /* 0x000fe200078e00ff */ /*01e0*/ MOV R5, c[0x0][0x164] ; /* 0x0000590000057a02 */ /* 0x000fe20000000f00 */ /*01f0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff067624 */ /* 0x000fe200078e00ff */ /*0200*/ MOV R20, 0x260 ; /* 0x0000026000147802 */ /* 0x000fe20000000f00 */ /*0210*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff077624 */ /* 0x000fe400078e00ff */ /*0220*/ IMAD.MOV.U32 R8, RZ, RZ, R18 ; /* 0x000000ffff087224 */ /* 0x000fe400078e0012 */ /*0230*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff097624 */ /* 0x000fe400078e00ff */ /*0240*/ IMAD.MOV.U32 R21, RZ, RZ, 0x0 ; /* 0x00000000ff157424 */ /* 0x000fc800078e00ff */ /*0250*/ CALL.REL.NOINC 0x640 ; /* 0x000003e000007944 */ /* 0x000fea0003c00000 */ /*0260*/ HFMA2.MMA R21, -RZ, RZ, 0, 0 ; /* 0x00000000ff157435 */ /* 0x000fe200000001ff */ /*0270*/ STG.E [R22.64], R4 ; /* 0x0000000416007986 */ /* 0x0001e2000c101924 */ /*0280*/ MOV R5, c[0x0][0x164] ; /* 0x0000590000057a02 */ /* 0x000fe20000000f00 */ /*0290*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff067624 */ /* 0x000fe200078e00ff */ /*02a0*/ MOV R20, 0x300 ; /* 0x0000030000147802 */ /* 0x000fe20000000f00 */ /*02b0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff077624 */ /* 0x000fc400078e00ff */ /*02c0*/ IMAD.MOV.U32 R8, RZ, RZ, R18 ; /* 0x000000ffff087224 */ /* 0x000fe400078e0012 */ /*02d0*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff097624 */ /* 0x000fe400078e00ff */ /*02e0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff047624 */ /* 0x001fcc00078e00ff */ /*02f0*/ CALL.REL.NOINC 0x640 ; /* 0x0000034000007944 */ /* 0x000fea0003c00000 */ /*0300*/ STG.E [R22.64], R4 ; /* 0x0000000416007986 */ /* 0x0001e2000c101924 */ /*0310*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff057624 */ /* 0x000fe200078e00ff */ /*0320*/ MOV R20, 0x3a0 ; /* 0x000003a000147802 */ /* 0x000fe20000000f00 */ /*0330*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff067624 */ /* 0x000fe400078e00ff */ /*0340*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff077624 */ /* 0x000fe400078e00ff */ /*0350*/ IMAD.MOV.U32 R8, RZ, RZ, R18 ; /* 0x000000ffff087224 */ /* 0x000fc400078e0012 */ /*0360*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff097624 */ /* 0x000fe400078e00ff */ /*0370*/ IMAD.MOV.U32 R21, RZ, RZ, 0x0 ; /* 0x00000000ff157424 */ /* 0x000fe200078e00ff */ /*0380*/ MOV R4, c[0x0][0x160] ; /* 0x0000580000047a02 */ /* 0x001fcc0000000f00 */ /*0390*/ CALL.REL.NOINC 0x640 ; /* 0x000002a000007944 */ /* 0x000fea0003c00000 */ /*03a0*/ STG.E [R22.64], R4 ; /* 0x0000000416007986 */ /* 0x0001e2000c101924 */ /*03b0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff057624 */ /* 0x000fe200078e00ff */ /*03c0*/ MOV R9, c[0x0][0x178] ; /* 0x00005e0000097a02 */ /* 0x000fe20000000f00 */ /*03d0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff067624 */ /* 0x000fe200078e00ff */ /*03e0*/ MOV R20, 0x440 ; /* 0x0000044000147802 */ /* 0x000fe20000000f00 */ /*03f0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff077624 */ /* 0x000fe400078e00ff */ /*0400*/ IMAD.MOV.U32 R8, RZ, RZ, R18 ; /* 0x000000ffff087224 */ /* 0x000fc400078e0012 */ /*0410*/ IMAD.MOV.U32 R21, RZ, RZ, 0x0 ; /* 0x00000000ff157424 */ /* 0x000fe400078e00ff */ /*0420*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff047624 */ /* 0x001fcc00078e00ff */ /*0430*/ CALL.REL.NOINC 0x640 ; /* 0x0000020000007944 */ /* 0x000fea0003c00000 */ /*0440*/ STG.E [R22.64], R4 ; /* 0x0000000416007986 */ /* 0x0001e4000c101924 */ /*0450*/ BSYNC B6 ; /* 0x0000000000067941 */ /* 0x000fea0003800000 */ /*0460*/ ISETP.NE.AND P0, PT, R19, RZ, PT ; /* 0x000000ff1300720c */ /* 0x000fda0003f05270 */ /*0470*/ @P0 BRA 0x140 ; /* 0xfffffcc000000947 */ /* 0x000fea000383ffff */ /*0480*/ ISETP.NE.U32.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fc80003f05070 */ /*0490*/ ISETP.NE.AND.EX P0, PT, RZ, RZ, PT, P0 ; /* 0x000000ffff00720c */ /* 0x000fda0003f05300 */ /*04a0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*04b0*/ IADD3 R16, P0, RZ, -R2, RZ ; /* 0x80000002ff107210 */ /* 0x000fca0007f1e0ff */ /*04c0*/ IMAD.X R2, RZ, RZ, -0x1, P0 ; /* 0xffffffffff027424 */ /* 0x000fe400000e06ff */ /*04d0*/ ISETP.GE.U32.AND P0, PT, R18, c[0x0][0x178], PT ; /* 0x00005e0012007a0c */ /* 0x000fe20003f06070 */ /*04e0*/ BSSY B6, 0x610 ; /* 0x0000012000067945 */ /* 0x000fe20003800000 */ /*04f0*/ IADD3 R16, P1, R16, 0x1, RZ ; /* 0x0000000110107810 */ /* 0x000fe40007f3e0ff */ /*0500*/ ISETP.GE.U32.AND.EX P0, PT, RZ, c[0x0][0x17c], PT, P0 ; /* 0x00005f00ff007a0c */ /* 0x000fc60003f06100 */ /*0510*/ IMAD.X R2, RZ, RZ, R2, P1 ; /* 0x000000ffff027224 */ /* 0x000fe200008e0602 */ /*0520*/ ISETP.NE.U32.AND P1, PT, R16, RZ, PT ; /* 0x000000ff1000720c */ /* 0x000fc80003f25070 */ /*0530*/ ISETP.NE.AND.EX P1, PT, R2, RZ, PT, P1 ; /* 0x000000ff0200720c */ /* 0x000fc80003f25310 */ /*0540*/ P2R R17, PR, RZ, 0x2 ; /* 0x00000002ff117803 */ /* 0x000fe20000000000 */ /*0550*/ @P0 BRA 0x600 ; /* 0x000000a000000947 */ /* 0x001fea0003800000 */ /*0560*/ HFMA2.MMA R21, -RZ, RZ, 0, 0 ; /* 0x00000000ff157435 */ /* 0x000fe200000001ff */ /*0570*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff047624 */ /* 0x001fe200078e00ff */ /*0580*/ MOV R5, c[0x0][0x164] ; /* 0x0000590000057a02 */ /* 0x000fe20000000f00 */ /*0590*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff067624 */ /* 0x000fe200078e00ff */ /*05a0*/ MOV R20, 0x5f0 ; /* 0x000005f000147802 */ /* 0x000fe20000000f00 */ /*05b0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff077624 */ /* 0x000fe400078e00ff */ /*05c0*/ IMAD.MOV.U32 R8, RZ, RZ, R18 ; /* 0x000000ffff087224 */ /* 0x000fe400078e0012 */ /*05d0*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff097624 */ /* 0x000fc800078e00ff */ /*05e0*/ CALL.REL.NOINC 0x640 ; /* 0x0000005000007944 */ /* 0x000fea0003c00000 */ /*05f0*/ STG.E [R22.64], R4 ; /* 0x0000000416007986 */ /* 0x0001e4000c101924 */ /*0600*/ BSYNC B6 ; /* 0x0000000000067941 */ /* 0x000fea0003800000 */ /*0610*/ ISETP.NE.AND P0, PT, R17, RZ, PT ; /* 0x000000ff1100720c */ /* 0x000fda0003f05270 */ /*0620*/ @P0 BRA 0x4d0 ; /* 0xfffffea000000947 */ /* 0x000fea000383ffff */ /*0630*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0640*/ IADD3 R1, R1, -0x18, RZ ; /* 0xffffffe801017810 */ /* 0x000fca0007ffe0ff */ /*0650*/ STL [R1+0x10], R21 ; /* 0x0000101501007387 */ /* 0x000fe80000100800 */ /*0660*/ STL [R1+0xc], R20 ; /* 0x00000c1401007387 */ /* 0x000fe80000100800 */ /*0670*/ STL [R1+0x8], R17 ; /* 0x0000081101007387 */ /* 0x000fe80000100800 */ /*0680*/ STL [R1+0x4], R16 ; /* 0x0000041001007387 */ /* 0x0001e80000100800 */ /*0690*/ STL [R1], R2 ; /* 0x0000000201007387 */ /* 0x0003e20000100800 */ /*06a0*/ BMOV.32.CLEAR R16, B6 ; /* 0x0000000006107355 */ /* 0x001e240000100000 */ /*06b0*/ ISETP.GE.AND P0, PT, R8, R9, PT ; /* 0x000000090800720c */ /* 0x000fe20003f06270 */ /*06c0*/ BSSY B6, 0x7e0 ; /* 0x0000011000067945 */ /* 0x000fd80003800000 */ /*06d0*/ @!P0 BRA 0x730 ; /* 0x0000005000008947 */ /* 0x000fea0003800000 */ /*06e0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*06f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*0700*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */ /* 0x000ea4000c1e1900 */ /*0710*/ IMAD.IADD R4, R4, 0x1, R7 ; /* 0x0000000104047824 */ /* 0x004fe200078e0207 */ /*0720*/ BRA 0x7d0 ; /* 0x000000a000007947 */ /* 0x000fea0003800000 */ /*0730*/ IMAD.WIDE R2, R8, 0x4, R6 ; /* 0x0000000408027825 */ /* 0x002fe200078e0206 */ /*0740*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0750*/ IMAD.WIDE R10, R8, 0x4, R4 ; /* 0x00000004080a7825 */ /* 0x000fe400078e0204 */ /*0760*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000368000c1e1900 */ /*0770*/ LDG.E R17, [R10.64] ; /* 0x000000040a117981 */ /* 0x000362000c1e1900 */ /*0780*/ IMAD.MOV.U32 R8, RZ, RZ, R9 ; /* 0x000000ffff087224 */ /* 0x000fe200078e0009 */ /*0790*/ MOV R20, 0x7c0 ; /* 0x000007c000147802 */ /* 0x000fe20000000f00 */ /*07a0*/ IMAD.MOV.U32 R21, RZ, RZ, 0x0 ; /* 0x00000000ff157424 */ /* 0x000fc800078e00ff */ /*07b0*/ CALL.REL.NOINC 0x640 ; /* 0xfffffe8000007944 */ /* 0x023fea0003c3ffff */ /*07c0*/ IADD3 R4, R4, R2, R17 ; /* 0x0000000204047210 */ /* 0x000fe40007ffe011 */ /*07d0*/ BSYNC B6 ; /* 0x0000000000067941 */ /* 0x000fea0003800000 */ /*07e0*/ LDL R20, [R1+0xc] ; /* 0x00000c0001147983 */ /* 0x0004e20000100800 */ /*07f0*/ BMOV.32 B6, R16 ; /* 0x0000001006007356 */ /* 0x0011e60000000000 */ /*0800*/ LDL R21, [R1+0x10] ; /* 0x0000100001157983 */ /* 0x0004e80000100800 */ /*0810*/ LDL R2, [R1] ; /* 0x0000000001027983 */ /* 0x0024e80000100800 */ /*0820*/ LDL R16, [R1+0x4] ; /* 0x0000040001107983 */ /* 0x0014e80000100800 */ /*0830*/ LDL R17, [R1+0x8] ; /* 0x0000080001117983 */ /* 0x0004e40000100800 */ /*0840*/ IADD3 R1, R1, 0x18, RZ ; /* 0x0000001801017810 */ /* 0x004fe20007ffe0ff */ /*0850*/ RET.REL.NODEC R20 0x0 ; /* 0xfffff7a014007950 */ /* 0x008fec0003c3ffff */ /*0860*/ BRA 0x860; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0870*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0880*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0890*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
__device__ int __attribute__ ((noinline)) add(int *l, int *r, int i, int N) { if (i < N) { return l[i] + r[i] + add(l, r, N, N); } else { return l[0] + r[0]; } } extern "C" __global__ void vecAdd(int *l, int *r, int *p, size_t N, size_t iter) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; for (size_t i = 0; i < iter; ++i) { if (idx < N) { p[idx] = add(l, r, idx, N); } } }
.file "tmpxft_000ddb70_00000000-6_vecAdd.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2030: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2030: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z3addPiS_ii .type _Z3addPiS_ii, @function _Z3addPiS_ii: .LFB2027: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2027: .size _Z3addPiS_ii, .-_Z3addPiS_ii .globl _Z31__device_stub__Z6vecAddPiS_S_mmPiS_S_mm .type _Z31__device_stub__Z6vecAddPiS_S_mmPiS_S_mm, @function _Z31__device_stub__Z6vecAddPiS_S_mmPiS_S_mm: .LFB2052: .cfi_startproc endbr64 subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movq %r8, 8(%rsp) movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 8(%rsp), %rax movq %rax, 144(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L9 .L5: movq 152(%rsp), %rax subq %fs:40, %rax jne .L10 addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 184 pushq 56(%rsp) .cfi_def_cfa_offset 192 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq vecAdd(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L5 .L10: call __stack_chk_fail@PLT .cfi_endproc .LFE2052: .size _Z31__device_stub__Z6vecAddPiS_S_mmPiS_S_mm, .-_Z31__device_stub__Z6vecAddPiS_S_mmPiS_S_mm .globl vecAdd .type vecAdd, @function vecAdd: .LFB2053: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z6vecAddPiS_S_mmPiS_S_mm addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2053: .size vecAdd, .-vecAdd .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "vecAdd" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2055: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq vecAdd(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2055: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
__device__ int __attribute__ ((noinline)) add(int *l, int *r, int i, int N) { if (i < N) { return l[i] + r[i] + add(l, r, N, N); } else { return l[0] + r[0]; } } extern "C" __global__ void vecAdd(int *l, int *r, int *p, size_t N, size_t iter) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; for (size_t i = 0; i < iter; ++i) { if (idx < N) { p[idx] = add(l, r, idx, N); } } }
#include <hip/hip_runtime.h> __device__ int __attribute__ ((noinline)) add(int *l, int *r, int i, int N) { if (i < N) { return l[i] + r[i] + add(l, r, N, N); } else { return l[0] + r[0]; } } extern "C" __global__ void vecAdd(int *l, int *r, int *p, size_t N, size_t iter) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; for (size_t i = 0; i < iter; ++i) { if (idx < N) { p[idx] = add(l, r, idx, N); } } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> __device__ int __attribute__ ((noinline)) add(int *l, int *r, int i, int N) { if (i < N) { return l[i] + r[i] + add(l, r, N, N); } else { return l[0] + r[0]; } } extern "C" __global__ void vecAdd(int *l, int *r, int *p, size_t N, size_t iter) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; for (size_t i = 0; i < iter; ++i) { if (idx < N) { p[idx] = add(l, r, idx, N); } } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .p2align 2 .type _Z3addPiS_ii,@function _Z3addPiS_ii: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) v_dual_mov_b32 v6, v5 :: v_dual_mov_b32 v5, 0 s_mov_b32 s0, 0 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_2 .p2align 6 .LBB0_1: s_or_b32 exec_lo, exec_lo, s2 v_mov_b32_e32 v4, v6 s_and_b32 s2, exec_lo, s1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_or_b32 s0, s2, s0 s_and_not1_b32 exec_lo, exec_lo, s0 s_cbranch_execz .LBB0_4 .LBB0_2: s_delay_alu instid0(VALU_DEP_1) v_mov_b32_e32 v7, v5 s_or_b32 s1, s1, exec_lo s_mov_b32 s2, exec_lo v_cmpx_lt_i32_e64 v4, v6 s_cbranch_execz .LBB0_1 v_ashrrev_i32_e32 v5, 31, v4 s_and_not1_b32 s1, s1, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[4:5] v_add_co_u32 v8, vcc_lo, v0, v4 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v9, vcc_lo, v1, v5, vcc_lo v_add_co_u32 v4, vcc_lo, v2, v4 v_add_co_ci_u32_e32 v5, vcc_lo, v3, v5, vcc_lo flat_load_b32 v8, v[8:9] flat_load_b32 v4, v[4:5] s_waitcnt vmcnt(0) lgkmcnt(0) v_add3_u32 v5, v4, v8, v7 s_branch .LBB0_1 .LBB0_4: s_set_inst_prefetch_distance 0x2 s_or_b32 exec_lo, exec_lo, s0 flat_load_b32 v0, v[0:1] flat_load_b32 v1, v[2:3] s_waitcnt vmcnt(0) lgkmcnt(0) v_add3_u32 v0, v1, v0, v7 s_setpc_b64 s[30:31] .Lfunc_end0: .size _Z3addPiS_ii, .Lfunc_end0-_Z3addPiS_ii .section .AMDGPU.csdata,"",@progbits .text .protected vecAdd .globl vecAdd .p2align 8 .type vecAdd,@function vecAdd: s_load_b64 s[12:13], s[0:1], 0x20 s_mov_b32 s32, 0 s_waitcnt lgkmcnt(0) s_cmp_eq_u64 s[12:13], 0 s_cbranch_scc1 .LBB1_5 s_clause 0x1 s_load_b32 s2, s[0:1], 0x34 s_load_b256 s[4:11], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s0, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[10:11], null, s15, s0, v[0:1] v_mov_b32_e32 v11, 0 v_lshlrev_b64 v[0:1], 2, v[10:11] v_cmp_gt_u64_e64 s3, s[10:11], v[10:11] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v11, vcc_lo, s8, v0 v_add_co_ci_u32_e32 v12, vcc_lo, s9, v1, vcc_lo s_branch .LBB1_3 .p2align 6 .LBB1_2: s_or_b32 exec_lo, exec_lo, s8 s_add_u32 s12, s12, -1 s_addc_u32 s13, s13, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u64 s[12:13], 0 s_cbranch_scc1 .LBB1_5 .LBB1_3: s_delay_alu instid0(VALU_DEP_3) s_and_saveexec_b32 s8, s3 s_cbranch_execz .LBB1_2 v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 v_dual_mov_b32 v4, v10 :: v_dual_mov_b32 v5, s10 s_getpc_b64 s[0:1] s_add_u32 s0, s0, _Z3addPiS_ii@rel32@lo+4 s_addc_u32 s1, s1, _Z3addPiS_ii@rel32@hi+12 s_delay_alu instid0(SALU_CYCLE_1) s_swappc_b64 s[30:31], s[0:1] global_store_b32 v[11:12], v0, off s_branch .LBB1_2 .LBB1_5: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel vecAdd .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 1 .amdhsa_enable_private_segment 1 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 13 .amdhsa_next_free_sgpr 33 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size vecAdd, .Lfunc_end1-vecAdd .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 8 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: vecAdd .private_segment_fixed_size: 0 .sgpr_count: 35 .sgpr_spill_count: 0 .symbol: vecAdd.kd .uniform_work_group_size: 1 .uses_dynamic_stack: true .vgpr_count: 13 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> __device__ int __attribute__ ((noinline)) add(int *l, int *r, int i, int N) { if (i < N) { return l[i] + r[i] + add(l, r, N, N); } else { return l[0] + r[0]; } } extern "C" __global__ void vecAdd(int *l, int *r, int *p, size_t N, size_t iter) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; for (size_t i = 0; i < iter; ++i) { if (idx < N) { p[idx] = add(l, r, idx, N); } } }
.text .file "vecAdd.hip" .globl __device_stub__vecAdd # -- Begin function __device_stub__vecAdd .p2align 4, 0x90 .type __device_stub__vecAdd,@function __device_stub__vecAdd: # @__device_stub__vecAdd .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 96(%rsp), %r9 movl $vecAdd, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $152, %rsp .cfi_adjust_cfa_offset -152 retq .Lfunc_end0: .size __device_stub__vecAdd, .Lfunc_end0-__device_stub__vecAdd .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $vecAdd, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type vecAdd,@object # @vecAdd .section .rodata,"a",@progbits .globl vecAdd .p2align 3, 0x0 vecAdd: .quad __device_stub__vecAdd .size vecAdd, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "vecAdd" .size .L__unnamed_1, 7 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__vecAdd .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym vecAdd .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : vecAdd .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R18, SR_CTAID.X ; /* 0x0000000000127919 */ /* 0x000e220000002500 */ /*0020*/ ISETP.NE.U32.AND P0, PT, RZ, c[0x0][0x180], PT ; /* 0x00006000ff007a0c */ /* 0x000fc60003f05070 */ /*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e620000002100 */ /*0040*/ ISETP.NE.AND.EX P0, PT, RZ, c[0x0][0x184], PT, P0 ; /* 0x00006100ff007a0c */ /* 0x000fda0003f05300 */ /*0050*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0060*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff027624 */ /* 0x000fe200078e00ff */ /*0070*/ ULDC.64 UR36, c[0x0][0x118] ; /* 0x0000460000247ab9 */ /* 0x000fe20000000a00 */ /*0080*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff007624 */ /* 0x000fe400078e00ff */ /*0090*/ IMAD R18, R18, c[0x0][0x0], R3 ; /* 0x0000000012127a24 */ /* 0x003fe200078e0203 */ /*00a0*/ IADD3 R4, P1, R2.reuse, -0x1, RZ ; /* 0xffffffff02047810 */ /* 0x040fe40007f3e0ff */ /*00b0*/ LOP3.LUT R2, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302027812 */ /* 0x000fe400078ec0ff */ /*00c0*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */ /* 0x000fe40003f06070 */ /*00d0*/ IADD3.X R0, R0, -0x1, RZ, P1, !PT ; /* 0xffffffff00007810 */ /* 0x000fc40000ffe4ff */ /*00e0*/ LEA R22, P1, R18, c[0x0][0x170], 0x2 ; /* 0x00005c0012167a11 */ /* 0x000fe400078210ff */ /*00f0*/ ISETP.GE.U32.AND.EX P0, PT, R0, RZ, PT, P0 ; /* 0x000000ff0000720c */ /* 0x000fe40003f06100 */ /*0100*/ LEA.HI.X R23, R18, c[0x0][0x174], RZ, 0x2, P1 ; /* 0x00005d0012177a11 */ /* 0x000fd600008f14ff */ /*0110*/ @!P0 BRA 0x480 ; /* 0x0000036000008947 */ /* 0x000fea0003800000 */ /*0120*/ IADD3 R17, P0, R2, -c[0x0][0x180], RZ ; /* 0x8000600002117a10 */ /* 0x000fca0007f1e0ff */ /*0130*/ IMAD.X R16, RZ, RZ, ~c[0x0][0x184], P0 ; /* 0x80006100ff107624 */ /* 0x000fe400000e06ff */ /*0140*/ ISETP.GE.U32.AND P0, PT, R18, c[0x0][0x178], PT ; /* 0x00005e0012007a0c */ /* 0x000fe20003f06070 */ /*0150*/ BSSY B6, 0x460 ; /* 0x0000030000067945 */ /* 0x000fe20003800000 */ /*0160*/ IADD3 R17, P1, R17, 0x4, RZ ; /* 0x0000000411117810 */ /* 0x000fe40007f3e0ff */ /*0170*/ ISETP.GE.U32.AND.EX P0, PT, RZ, c[0x0][0x17c], PT, P0 ; /* 0x00005f00ff007a0c */ /* 0x000fc60003f06100 */ /*0180*/ IMAD.X R16, RZ, RZ, R16, P1 ; /* 0x000000ffff107224 */ /* 0x000fe200008e0610 */ /*0190*/ ISETP.NE.U32.AND P1, PT, R17, RZ, PT ; /* 0x000000ff1100720c */ /* 0x000fc80003f25070 */ /*01a0*/ ISETP.NE.AND.EX P1, PT, R16, RZ, PT, P1 ; /* 0x000000ff1000720c */ /* 0x000fc80003f25310 */ /*01b0*/ P2R R19, PR, RZ, 0x2 ; /* 0x00000002ff137803 */ /* 0x000fe20000000000 */ /*01c0*/ @P0 BRA 0x450 ; /* 0x0000028000000947 */ /* 0x001fea0003800000 */ /*01d0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff047624 */ /* 0x000fe200078e00ff */ /*01e0*/ MOV R5, c[0x0][0x164] ; /* 0x0000590000057a02 */ /* 0x000fe20000000f00 */ /*01f0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff067624 */ /* 0x000fe200078e00ff */ /*0200*/ MOV R20, 0x260 ; /* 0x0000026000147802 */ /* 0x000fe20000000f00 */ /*0210*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff077624 */ /* 0x000fe400078e00ff */ /*0220*/ IMAD.MOV.U32 R8, RZ, RZ, R18 ; /* 0x000000ffff087224 */ /* 0x000fe400078e0012 */ /*0230*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff097624 */ /* 0x000fe400078e00ff */ /*0240*/ IMAD.MOV.U32 R21, RZ, RZ, 0x0 ; /* 0x00000000ff157424 */ /* 0x000fc800078e00ff */ /*0250*/ CALL.REL.NOINC 0x640 ; /* 0x000003e000007944 */ /* 0x000fea0003c00000 */ /*0260*/ HFMA2.MMA R21, -RZ, RZ, 0, 0 ; /* 0x00000000ff157435 */ /* 0x000fe200000001ff */ /*0270*/ STG.E [R22.64], R4 ; /* 0x0000000416007986 */ /* 0x0001e2000c101924 */ /*0280*/ MOV R5, c[0x0][0x164] ; /* 0x0000590000057a02 */ /* 0x000fe20000000f00 */ /*0290*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff067624 */ /* 0x000fe200078e00ff */ /*02a0*/ MOV R20, 0x300 ; /* 0x0000030000147802 */ /* 0x000fe20000000f00 */ /*02b0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff077624 */ /* 0x000fc400078e00ff */ /*02c0*/ IMAD.MOV.U32 R8, RZ, RZ, R18 ; /* 0x000000ffff087224 */ /* 0x000fe400078e0012 */ /*02d0*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff097624 */ /* 0x000fe400078e00ff */ /*02e0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff047624 */ /* 0x001fcc00078e00ff */ /*02f0*/ CALL.REL.NOINC 0x640 ; /* 0x0000034000007944 */ /* 0x000fea0003c00000 */ /*0300*/ STG.E [R22.64], R4 ; /* 0x0000000416007986 */ /* 0x0001e2000c101924 */ /*0310*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff057624 */ /* 0x000fe200078e00ff */ /*0320*/ MOV R20, 0x3a0 ; /* 0x000003a000147802 */ /* 0x000fe20000000f00 */ /*0330*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff067624 */ /* 0x000fe400078e00ff */ /*0340*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff077624 */ /* 0x000fe400078e00ff */ /*0350*/ IMAD.MOV.U32 R8, RZ, RZ, R18 ; /* 0x000000ffff087224 */ /* 0x000fc400078e0012 */ /*0360*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff097624 */ /* 0x000fe400078e00ff */ /*0370*/ IMAD.MOV.U32 R21, RZ, RZ, 0x0 ; /* 0x00000000ff157424 */ /* 0x000fe200078e00ff */ /*0380*/ MOV R4, c[0x0][0x160] ; /* 0x0000580000047a02 */ /* 0x001fcc0000000f00 */ /*0390*/ CALL.REL.NOINC 0x640 ; /* 0x000002a000007944 */ /* 0x000fea0003c00000 */ /*03a0*/ STG.E [R22.64], R4 ; /* 0x0000000416007986 */ /* 0x0001e2000c101924 */ /*03b0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff057624 */ /* 0x000fe200078e00ff */ /*03c0*/ MOV R9, c[0x0][0x178] ; /* 0x00005e0000097a02 */ /* 0x000fe20000000f00 */ /*03d0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff067624 */ /* 0x000fe200078e00ff */ /*03e0*/ MOV R20, 0x440 ; /* 0x0000044000147802 */ /* 0x000fe20000000f00 */ /*03f0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff077624 */ /* 0x000fe400078e00ff */ /*0400*/ IMAD.MOV.U32 R8, RZ, RZ, R18 ; /* 0x000000ffff087224 */ /* 0x000fc400078e0012 */ /*0410*/ IMAD.MOV.U32 R21, RZ, RZ, 0x0 ; /* 0x00000000ff157424 */ /* 0x000fe400078e00ff */ /*0420*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff047624 */ /* 0x001fcc00078e00ff */ /*0430*/ CALL.REL.NOINC 0x640 ; /* 0x0000020000007944 */ /* 0x000fea0003c00000 */ /*0440*/ STG.E [R22.64], R4 ; /* 0x0000000416007986 */ /* 0x0001e4000c101924 */ /*0450*/ BSYNC B6 ; /* 0x0000000000067941 */ /* 0x000fea0003800000 */ /*0460*/ ISETP.NE.AND P0, PT, R19, RZ, PT ; /* 0x000000ff1300720c */ /* 0x000fda0003f05270 */ /*0470*/ @P0 BRA 0x140 ; /* 0xfffffcc000000947 */ /* 0x000fea000383ffff */ /*0480*/ ISETP.NE.U32.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fc80003f05070 */ /*0490*/ ISETP.NE.AND.EX P0, PT, RZ, RZ, PT, P0 ; /* 0x000000ffff00720c */ /* 0x000fda0003f05300 */ /*04a0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*04b0*/ IADD3 R16, P0, RZ, -R2, RZ ; /* 0x80000002ff107210 */ /* 0x000fca0007f1e0ff */ /*04c0*/ IMAD.X R2, RZ, RZ, -0x1, P0 ; /* 0xffffffffff027424 */ /* 0x000fe400000e06ff */ /*04d0*/ ISETP.GE.U32.AND P0, PT, R18, c[0x0][0x178], PT ; /* 0x00005e0012007a0c */ /* 0x000fe20003f06070 */ /*04e0*/ BSSY B6, 0x610 ; /* 0x0000012000067945 */ /* 0x000fe20003800000 */ /*04f0*/ IADD3 R16, P1, R16, 0x1, RZ ; /* 0x0000000110107810 */ /* 0x000fe40007f3e0ff */ /*0500*/ ISETP.GE.U32.AND.EX P0, PT, RZ, c[0x0][0x17c], PT, P0 ; /* 0x00005f00ff007a0c */ /* 0x000fc60003f06100 */ /*0510*/ IMAD.X R2, RZ, RZ, R2, P1 ; /* 0x000000ffff027224 */ /* 0x000fe200008e0602 */ /*0520*/ ISETP.NE.U32.AND P1, PT, R16, RZ, PT ; /* 0x000000ff1000720c */ /* 0x000fc80003f25070 */ /*0530*/ ISETP.NE.AND.EX P1, PT, R2, RZ, PT, P1 ; /* 0x000000ff0200720c */ /* 0x000fc80003f25310 */ /*0540*/ P2R R17, PR, RZ, 0x2 ; /* 0x00000002ff117803 */ /* 0x000fe20000000000 */ /*0550*/ @P0 BRA 0x600 ; /* 0x000000a000000947 */ /* 0x001fea0003800000 */ /*0560*/ HFMA2.MMA R21, -RZ, RZ, 0, 0 ; /* 0x00000000ff157435 */ /* 0x000fe200000001ff */ /*0570*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff047624 */ /* 0x001fe200078e00ff */ /*0580*/ MOV R5, c[0x0][0x164] ; /* 0x0000590000057a02 */ /* 0x000fe20000000f00 */ /*0590*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff067624 */ /* 0x000fe200078e00ff */ /*05a0*/ MOV R20, 0x5f0 ; /* 0x000005f000147802 */ /* 0x000fe20000000f00 */ /*05b0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff077624 */ /* 0x000fe400078e00ff */ /*05c0*/ IMAD.MOV.U32 R8, RZ, RZ, R18 ; /* 0x000000ffff087224 */ /* 0x000fe400078e0012 */ /*05d0*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff097624 */ /* 0x000fc800078e00ff */ /*05e0*/ CALL.REL.NOINC 0x640 ; /* 0x0000005000007944 */ /* 0x000fea0003c00000 */ /*05f0*/ STG.E [R22.64], R4 ; /* 0x0000000416007986 */ /* 0x0001e4000c101924 */ /*0600*/ BSYNC B6 ; /* 0x0000000000067941 */ /* 0x000fea0003800000 */ /*0610*/ ISETP.NE.AND P0, PT, R17, RZ, PT ; /* 0x000000ff1100720c */ /* 0x000fda0003f05270 */ /*0620*/ @P0 BRA 0x4d0 ; /* 0xfffffea000000947 */ /* 0x000fea000383ffff */ /*0630*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0640*/ IADD3 R1, R1, -0x18, RZ ; /* 0xffffffe801017810 */ /* 0x000fca0007ffe0ff */ /*0650*/ STL [R1+0x10], R21 ; /* 0x0000101501007387 */ /* 0x000fe80000100800 */ /*0660*/ STL [R1+0xc], R20 ; /* 0x00000c1401007387 */ /* 0x000fe80000100800 */ /*0670*/ STL [R1+0x8], R17 ; /* 0x0000081101007387 */ /* 0x000fe80000100800 */ /*0680*/ STL [R1+0x4], R16 ; /* 0x0000041001007387 */ /* 0x0001e80000100800 */ /*0690*/ STL [R1], R2 ; /* 0x0000000201007387 */ /* 0x0003e20000100800 */ /*06a0*/ BMOV.32.CLEAR R16, B6 ; /* 0x0000000006107355 */ /* 0x001e240000100000 */ /*06b0*/ ISETP.GE.AND P0, PT, R8, R9, PT ; /* 0x000000090800720c */ /* 0x000fe20003f06270 */ /*06c0*/ BSSY B6, 0x7e0 ; /* 0x0000011000067945 */ /* 0x000fd80003800000 */ /*06d0*/ @!P0 BRA 0x730 ; /* 0x0000005000008947 */ /* 0x000fea0003800000 */ /*06e0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*06f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*0700*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */ /* 0x000ea4000c1e1900 */ /*0710*/ IMAD.IADD R4, R4, 0x1, R7 ; /* 0x0000000104047824 */ /* 0x004fe200078e0207 */ /*0720*/ BRA 0x7d0 ; /* 0x000000a000007947 */ /* 0x000fea0003800000 */ /*0730*/ IMAD.WIDE R2, R8, 0x4, R6 ; /* 0x0000000408027825 */ /* 0x002fe200078e0206 */ /*0740*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0750*/ IMAD.WIDE R10, R8, 0x4, R4 ; /* 0x00000004080a7825 */ /* 0x000fe400078e0204 */ /*0760*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000368000c1e1900 */ /*0770*/ LDG.E R17, [R10.64] ; /* 0x000000040a117981 */ /* 0x000362000c1e1900 */ /*0780*/ IMAD.MOV.U32 R8, RZ, RZ, R9 ; /* 0x000000ffff087224 */ /* 0x000fe200078e0009 */ /*0790*/ MOV R20, 0x7c0 ; /* 0x000007c000147802 */ /* 0x000fe20000000f00 */ /*07a0*/ IMAD.MOV.U32 R21, RZ, RZ, 0x0 ; /* 0x00000000ff157424 */ /* 0x000fc800078e00ff */ /*07b0*/ CALL.REL.NOINC 0x640 ; /* 0xfffffe8000007944 */ /* 0x023fea0003c3ffff */ /*07c0*/ IADD3 R4, R4, R2, R17 ; /* 0x0000000204047210 */ /* 0x000fe40007ffe011 */ /*07d0*/ BSYNC B6 ; /* 0x0000000000067941 */ /* 0x000fea0003800000 */ /*07e0*/ LDL R20, [R1+0xc] ; /* 0x00000c0001147983 */ /* 0x0004e20000100800 */ /*07f0*/ BMOV.32 B6, R16 ; /* 0x0000001006007356 */ /* 0x0011e60000000000 */ /*0800*/ LDL R21, [R1+0x10] ; /* 0x0000100001157983 */ /* 0x0004e80000100800 */ /*0810*/ LDL R2, [R1] ; /* 0x0000000001027983 */ /* 0x0024e80000100800 */ /*0820*/ LDL R16, [R1+0x4] ; /* 0x0000040001107983 */ /* 0x0014e80000100800 */ /*0830*/ LDL R17, [R1+0x8] ; /* 0x0000080001117983 */ /* 0x0004e40000100800 */ /*0840*/ IADD3 R1, R1, 0x18, RZ ; /* 0x0000001801017810 */ /* 0x004fe20007ffe0ff */ /*0850*/ RET.REL.NODEC R20 0x0 ; /* 0xfffff7a014007950 */ /* 0x008fec0003c3ffff */ /*0860*/ BRA 0x860; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0870*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0880*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0890*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .p2align 2 .type _Z3addPiS_ii,@function _Z3addPiS_ii: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) v_dual_mov_b32 v6, v5 :: v_dual_mov_b32 v5, 0 s_mov_b32 s0, 0 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_2 .p2align 6 .LBB0_1: s_or_b32 exec_lo, exec_lo, s2 v_mov_b32_e32 v4, v6 s_and_b32 s2, exec_lo, s1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_or_b32 s0, s2, s0 s_and_not1_b32 exec_lo, exec_lo, s0 s_cbranch_execz .LBB0_4 .LBB0_2: s_delay_alu instid0(VALU_DEP_1) v_mov_b32_e32 v7, v5 s_or_b32 s1, s1, exec_lo s_mov_b32 s2, exec_lo v_cmpx_lt_i32_e64 v4, v6 s_cbranch_execz .LBB0_1 v_ashrrev_i32_e32 v5, 31, v4 s_and_not1_b32 s1, s1, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[4:5] v_add_co_u32 v8, vcc_lo, v0, v4 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v9, vcc_lo, v1, v5, vcc_lo v_add_co_u32 v4, vcc_lo, v2, v4 v_add_co_ci_u32_e32 v5, vcc_lo, v3, v5, vcc_lo flat_load_b32 v8, v[8:9] flat_load_b32 v4, v[4:5] s_waitcnt vmcnt(0) lgkmcnt(0) v_add3_u32 v5, v4, v8, v7 s_branch .LBB0_1 .LBB0_4: s_set_inst_prefetch_distance 0x2 s_or_b32 exec_lo, exec_lo, s0 flat_load_b32 v0, v[0:1] flat_load_b32 v1, v[2:3] s_waitcnt vmcnt(0) lgkmcnt(0) v_add3_u32 v0, v1, v0, v7 s_setpc_b64 s[30:31] .Lfunc_end0: .size _Z3addPiS_ii, .Lfunc_end0-_Z3addPiS_ii .section .AMDGPU.csdata,"",@progbits .text .protected vecAdd .globl vecAdd .p2align 8 .type vecAdd,@function vecAdd: s_load_b64 s[12:13], s[0:1], 0x20 s_mov_b32 s32, 0 s_waitcnt lgkmcnt(0) s_cmp_eq_u64 s[12:13], 0 s_cbranch_scc1 .LBB1_5 s_clause 0x1 s_load_b32 s2, s[0:1], 0x34 s_load_b256 s[4:11], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s0, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[10:11], null, s15, s0, v[0:1] v_mov_b32_e32 v11, 0 v_lshlrev_b64 v[0:1], 2, v[10:11] v_cmp_gt_u64_e64 s3, s[10:11], v[10:11] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v11, vcc_lo, s8, v0 v_add_co_ci_u32_e32 v12, vcc_lo, s9, v1, vcc_lo s_branch .LBB1_3 .p2align 6 .LBB1_2: s_or_b32 exec_lo, exec_lo, s8 s_add_u32 s12, s12, -1 s_addc_u32 s13, s13, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u64 s[12:13], 0 s_cbranch_scc1 .LBB1_5 .LBB1_3: s_delay_alu instid0(VALU_DEP_3) s_and_saveexec_b32 s8, s3 s_cbranch_execz .LBB1_2 v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 v_dual_mov_b32 v4, v10 :: v_dual_mov_b32 v5, s10 s_getpc_b64 s[0:1] s_add_u32 s0, s0, _Z3addPiS_ii@rel32@lo+4 s_addc_u32 s1, s1, _Z3addPiS_ii@rel32@hi+12 s_delay_alu instid0(SALU_CYCLE_1) s_swappc_b64 s[30:31], s[0:1] global_store_b32 v[11:12], v0, off s_branch .LBB1_2 .LBB1_5: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel vecAdd .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 1 .amdhsa_enable_private_segment 1 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 13 .amdhsa_next_free_sgpr 33 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size vecAdd, .Lfunc_end1-vecAdd .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 8 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: vecAdd .private_segment_fixed_size: 0 .sgpr_count: 35 .sgpr_spill_count: 0 .symbol: vecAdd.kd .uniform_work_group_size: 1 .uses_dynamic_stack: true .vgpr_count: 13 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000ddb70_00000000-6_vecAdd.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2030: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2030: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z3addPiS_ii .type _Z3addPiS_ii, @function _Z3addPiS_ii: .LFB2027: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2027: .size _Z3addPiS_ii, .-_Z3addPiS_ii .globl _Z31__device_stub__Z6vecAddPiS_S_mmPiS_S_mm .type _Z31__device_stub__Z6vecAddPiS_S_mmPiS_S_mm, @function _Z31__device_stub__Z6vecAddPiS_S_mmPiS_S_mm: .LFB2052: .cfi_startproc endbr64 subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movq %r8, 8(%rsp) movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 8(%rsp), %rax movq %rax, 144(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L9 .L5: movq 152(%rsp), %rax subq %fs:40, %rax jne .L10 addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 184 pushq 56(%rsp) .cfi_def_cfa_offset 192 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq vecAdd(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L5 .L10: call __stack_chk_fail@PLT .cfi_endproc .LFE2052: .size _Z31__device_stub__Z6vecAddPiS_S_mmPiS_S_mm, .-_Z31__device_stub__Z6vecAddPiS_S_mmPiS_S_mm .globl vecAdd .type vecAdd, @function vecAdd: .LFB2053: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z6vecAddPiS_S_mmPiS_S_mm addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2053: .size vecAdd, .-vecAdd .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "vecAdd" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2055: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq vecAdd(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2055: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "vecAdd.hip" .globl __device_stub__vecAdd # -- Begin function __device_stub__vecAdd .p2align 4, 0x90 .type __device_stub__vecAdd,@function __device_stub__vecAdd: # @__device_stub__vecAdd .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 96(%rsp), %r9 movl $vecAdd, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $152, %rsp .cfi_adjust_cfa_offset -152 retq .Lfunc_end0: .size __device_stub__vecAdd, .Lfunc_end0-__device_stub__vecAdd .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $vecAdd, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type vecAdd,@object # @vecAdd .section .rodata,"a",@progbits .globl vecAdd .p2align 3, 0x0 vecAdd: .quad __device_stub__vecAdd .size vecAdd, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "vecAdd" .size .L__unnamed_1, 7 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__vecAdd .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym vecAdd .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <stdlib.h> __global__ void kernel1(int* d_data) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; d_data[tid] += 1; } __global__ void kernel2(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; const int numElementPerThread = numElement/nthread; const int start = tid*numElementPerThread; int end = start + numElementPerThread; for(int i = start; i < end; i++) { d_data[i] += 1; } } // __global__ void kernel2_opt(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; for(int i = tid; i < numElement; i += nthread) { d_data[i] += 1; } } void demo1() { const int numElement = 512*1024; int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i] + 1; } int* d_data; cudaMalloc(&d_data, sizeof(int)*numElement); cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); kernel1<<<1024, 512>>>(d_data); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("Kernel elapsed time: %.3f ms\n", elapsedTime); printf("kernel1: %s\n", cudaGetErrorString(cudaGetLastError())); cudaMemcpy(h_data, d_data, sizeof(int)*numElement, cudaMemcpyDeviceToHost); cudaFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED.\n"); return; } } printf("Test pass...\n"); free(h_data); free(gold); } void demo2(const int numElement) { printf("numElement = %d\n", numElement); int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i]; } int* d_data; cudaMalloc(&d_data, sizeof(int)*numElement); cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); float elapsedTime = 0.0f; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /*CPU*/ elapsedTime = 0.0f; cudaEventRecord(start, 0); for(int i = 0; i < numElement; i++) { gold[i] += 1; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("CPU elapsed time: %.3f ms\n", elapsedTime); /*GPU method 1*/ elapsedTime = 0.0f; cudaEventRecord(start, 0); kernel2<<<1024, 512>>>(d_data, numElement); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("kernel2 elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", cudaGetErrorString(cudaGetLastError())); /*GPU method 2*/ cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); cudaEventRecord(start, 0); kernel2_opt<<<1024, 512>>>(d_data, numElement); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("kernel2_opt elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", cudaGetErrorString(cudaGetLastError())); cudaMemcpy(h_data, d_data, sizeof(int)*numElement, cudaMemcpyDeviceToHost); cudaFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED. i = %d: %d, %d\n", i, h_data[i], gold[i]); return; } } printf("Test pass...\n"); free(h_data); free(gold); } int main() { int numElement = 1*1024*1024; demo2(numElement); //execute once to warm up for performance measurement printf("\n\nstart ............................................\n"); printf("demo2 started!\n"); for(int i = numElement; i <= 32*1024*1024; i*=2) { demo2(i); printf("\n"); } printf("demo1 started!\n"); demo1(); return EXIT_SUCCESS; }
code for sm_80 Function : _Z11kernel2_optPii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e280000002500 */ /*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */ /* 0x001fca00078e0200 */ /*0040*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x168], PT ; /* 0x00005a0003007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff007624 */ /* 0x000fe200078e00ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0080*/ BSSY B0, 0x2f0 ; /* 0x0000026000007945 */ /* 0x000fe40003800000 */ /*0090*/ IMAD R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a24 */ /* 0x000fc800078e02ff */ /*00a0*/ I2F.U32.RP R6, R0 ; /* 0x0000000000067306 */ /* 0x000e220000209000 */ /*00b0*/ IMAD.MOV R9, RZ, RZ, -R0 ; /* 0x000000ffff097224 */ /* 0x000fe200078e0a00 */ /*00c0*/ ISETP.NE.U32.AND P2, PT, R0.reuse, RZ, PT ; /* 0x000000ff0000720c */ /* 0x040fe20003f45070 */ /*00d0*/ IMAD.IADD R2, R0, 0x1, R3 ; /* 0x0000000100027824 */ /* 0x000fca00078e0203 */ /*00e0*/ LOP3.LUT R7, RZ, R2, RZ, 0x33, !PT ; /* 0x00000002ff077212 */ /* 0x000fc800078e33ff */ /*00f0*/ IADD3 R7, R7, c[0x0][0x168], R0 ; /* 0x00005a0007077a10 */ /* 0x000fe20007ffe000 */ /*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */ /* 0x001e240000001000 */ /*0110*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */ /* 0x001fcc0007ffe0ff */ /*0120*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */ /* 0x000064000021f000 */ /*0130*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */ /* 0x001fe400078e00ff */ /*0140*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */ /* 0x002fc800078e02ff */ /*0150*/ IMAD.HI.U32 R2, R5, R9, R4 ; /* 0x0000000905027227 */ /* 0x000fcc00078e0004 */ /*0160*/ IMAD.HI.U32 R2, R2, R7, RZ ; /* 0x0000000702027227 */ /* 0x000fc800078e00ff */ /*0170*/ IMAD.MOV R4, RZ, RZ, -R2 ; /* 0x000000ffff047224 */ /* 0x000fc800078e0a02 */ /*0180*/ IMAD R7, R0, R4, R7 ; /* 0x0000000400077224 */ /* 0x000fca00078e0207 */ /*0190*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */ /* 0x000fda0003f06070 */ /*01a0*/ @P0 IMAD.IADD R7, R7, 0x1, -R0 ; /* 0x0000000107070824 */ /* 0x000fe200078e0a00 */ /*01b0*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */ /* 0x000fc80007ffe0ff */ /*01c0*/ ISETP.GE.U32.AND P1, PT, R7, R0, PT ; /* 0x000000000700720c */ /* 0x000fda0003f26070 */ /*01d0*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */ /* 0x000fe40007ffe0ff */ /*01e0*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */ /* 0x000fc800078e33ff */ /*01f0*/ IADD3 R4, R2.reuse, 0x1, RZ ; /* 0x0000000102047810 */ /* 0x040fe40007ffe0ff */ /*0200*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */ /* 0x000fe40003f26070 */ /*0210*/ LOP3.LUT P0, R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */ /* 0x000fda000780c0ff */ /*0220*/ @!P0 BRA 0x2e0 ; /* 0x000000b000008947 */ /* 0x000fea0003800000 */ /*0230*/ IMAD.MOV.U32 R6, RZ, RZ, 0x4 ; /* 0x00000004ff067424 */ /* 0x000fe400078e00ff */ /*0240*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */ /* 0x000fe400078e0004 */ /*0250*/ IMAD.WIDE R4, R3, R6, c[0x0][0x160] ; /* 0x0000580003047625 */ /* 0x000fca00078e0206 */ /*0260*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */ /* 0x000ea2000c1e1900 */ /*0270*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */ /* 0x000fe20007ffe0ff */ /*0280*/ IMAD.IADD R3, R0, 0x1, R3 ; /* 0x0000000100037824 */ /* 0x000fc600078e0203 */ /*0290*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fe40003f05270 */ /*02a0*/ IADD3 R7, R6, 0x1, RZ ; /* 0x0000000106077810 */ /* 0x004fca0007ffe0ff */ /*02b0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x0001e4000c101904 */ /*02c0*/ IMAD.WIDE R4, R0, 0x4, R4 ; /* 0x0000000400047825 */ /* 0x001fc800078e0204 */ /*02d0*/ @P0 BRA 0x260 ; /* 0xffffff8000000947 */ /* 0x000fea000383ffff */ /*02e0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*02f0*/ @!P1 EXIT ; /* 0x000000000000994d */ /* 0x000fea0003800000 */ /*0300*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */ /* 0x001fc800078e00ff */ /*0310*/ IMAD.WIDE R4, R3, R4, c[0x0][0x160] ; /* 0x0000580003047625 */ /* 0x000fca00078e0204 */ /*0320*/ LDG.E R2, [R4.64] ; /* 0x0000000404027981 */ /* 0x000ea2000c1e1900 */ /*0330*/ IMAD.WIDE R6, R0, 0x4, R4 ; /* 0x0000000400067825 */ /* 0x000fe200078e0204 */ /*0340*/ IADD3 R13, R2, 0x1, RZ ; /* 0x00000001020d7810 */ /* 0x004fca0007ffe0ff */ /*0350*/ STG.E [R4.64], R13 ; /* 0x0000000d04007986 */ /* 0x0001e8000c101904 */ /*0360*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */ /* 0x000ea2000c1e1900 */ /*0370*/ IMAD.WIDE R8, R0, 0x4, R6 ; /* 0x0000000400087825 */ /* 0x000fe200078e0206 */ /*0380*/ IADD3 R15, R2, 0x1, RZ ; /* 0x00000001020f7810 */ /* 0x004fca0007ffe0ff */ /*0390*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0001e8000c101904 */ /*03a0*/ LDG.E R2, [R8.64] ; /* 0x0000000408027981 */ /* 0x000ea2000c1e1900 */ /*03b0*/ IMAD.WIDE R10, R0, 0x4, R8 ; /* 0x00000004000a7825 */ /* 0x000fe200078e0208 */ /*03c0*/ IADD3 R17, R2, 0x1, RZ ; /* 0x0000000102117810 */ /* 0x004fca0007ffe0ff */ /*03d0*/ STG.E [R8.64], R17 ; /* 0x0000001108007986 */ /* 0x0001e8000c101904 */ /*03e0*/ LDG.E R2, [R10.64] ; /* 0x000000040a027981 */ /* 0x000ea2000c1e1900 */ /*03f0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */ /* 0x000fc80007ffe000 */ /*0400*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */ /* 0x000fc80007ffe000 */ /*0410*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x168], PT ; /* 0x00005a0003007a0c */ /* 0x000fe40003f06270 */ /*0420*/ IADD3 R19, R2, 0x1, RZ ; /* 0x0000000102137810 */ /* 0x004fca0007ffe0ff */ /*0430*/ STG.E [R10.64], R19 ; /* 0x000000130a007986 */ /* 0x0001ec000c101904 */ /*0440*/ @!P0 BRA 0x300 ; /* 0xfffffeb000008947 */ /* 0x000fea000383ffff */ /*0450*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0460*/ BRA 0x460; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0470*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0480*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0490*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z7kernel2Pii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff007624 */ /* 0x000fe200078e00ff */ /*0020*/ IABS R8, c[0x0][0x168] ; /* 0x00005a0000087a13 */ /* 0x000fc60000000000 */ /*0030*/ IMAD R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a24 */ /* 0x000fca00078e02ff */ /*0040*/ IABS R5, R0.reuse ; /* 0x0000000000057213 */ /* 0x080fe40000000000 */ /*0050*/ IABS R9, R0 ; /* 0x0000000000097213 */ /* 0x000fe40000000000 */ /*0060*/ I2F.RP R4, R5 ; /* 0x0000000500047306 */ /* 0x000e300000209400 */ /*0070*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */ /* 0x001e240000001000 */ /*0080*/ IADD3 R2, R4, 0xffffffe, RZ ; /* 0x0ffffffe04027810 */ /* 0x001fe20007ffe0ff */ /*0090*/ IMAD.MOV R4, RZ, RZ, -R9 ; /* 0x000000ffff047224 */ /* 0x000fca00078e0a09 */ /*00a0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */ /* 0x000064000021f000 */ /*00b0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */ /* 0x001fe400078e00ff */ /*00c0*/ IMAD.MOV R6, RZ, RZ, -R3 ; /* 0x000000ffff067224 */ /* 0x002fc800078e0a03 */ /*00d0*/ IMAD R7, R6, R5, RZ ; /* 0x0000000506077224 */ /* 0x000fe400078e02ff */ /*00e0*/ IMAD.MOV.U32 R6, RZ, RZ, R8 ; /* 0x000000ffff067224 */ /* 0x000fe400078e0008 */ /*00f0*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */ /* 0x000fcc00078e0002 */ /*0100*/ IMAD.HI.U32 R3, R3, R6, RZ ; /* 0x0000000603037227 */ /* 0x000fc800078e00ff */ /*0110*/ IMAD R2, R3, R4, R6 ; /* 0x0000000403027224 */ /* 0x000fca00078e0206 */ /*0120*/ ISETP.GT.U32.AND P2, PT, R5, R2, PT ; /* 0x000000020500720c */ /* 0x000fda0003f44070 */ /*0130*/ @!P2 IMAD.IADD R2, R2, 0x1, -R5 ; /* 0x000000010202a824 */ /* 0x000fe200078e0a05 */ /*0140*/ @!P2 IADD3 R3, R3, 0x1, RZ ; /* 0x000000010303a810 */ /* 0x000fe40007ffe0ff */ /*0150*/ ISETP.NE.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fe40003f45270 */ /*0160*/ ISETP.GE.U32.AND P0, PT, R2, R5, PT ; /* 0x000000050200720c */ /* 0x000fe40003f06070 */ /*0170*/ LOP3.LUT R2, R0, c[0x0][0x168], RZ, 0x3c, !PT ; /* 0x00005a0000027a12 */ /* 0x000fe200078e3cff */ /*0180*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e260000002100 */ /*0190*/ ISETP.GE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fc40003f26270 */ /*01a0*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e2a0000002500 */ /*01b0*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */ /* 0x000fcc0007ffe0ff */ /*01c0*/ @!P1 IMAD.MOV R3, RZ, RZ, -R3 ; /* 0x000000ffff039224 */ /* 0x000fe200078e0a03 */ /*01d0*/ @!P2 LOP3.LUT R3, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff03a212 */ /* 0x000fc800078e33ff */ /*01e0*/ ISETP.GE.AND P0, PT, R3, 0x1, PT ; /* 0x000000010300780c */ /* 0x000fe20003f06270 */ /*01f0*/ IMAD R0, R2, c[0x0][0x0], R5 ; /* 0x0000000002007a24 */ /* 0x001fd800078e0205 */ /*0200*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0210*/ IMAD R5, R0, R3, RZ ; /* 0x0000000300057224 */ /* 0x000fe200078e02ff */ /*0220*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0230*/ BSSY B0, 0x3d0 ; /* 0x0000019000007945 */ /* 0x000fe40003800000 */ /*0240*/ IMAD.IADD R0, R3, 0x1, R5 ; /* 0x0000000103007824 */ /* 0x000fe200078e0205 */ /*0250*/ IADD3 R3, R5, 0x1, RZ ; /* 0x0000000105037810 */ /* 0x000fc80007ffe0ff */ /*0260*/ IMNMX R4, R0, R3, !PT ; /* 0x0000000300047217 */ /* 0x000fe40007800200 */ /*0270*/ LOP3.LUT R3, RZ, R5, RZ, 0x33, !PT ; /* 0x00000005ff037212 */ /* 0x000fc600078e33ff */ /*0280*/ IMAD.IADD R2, R4.reuse, 0x1, -R5 ; /* 0x0000000104027824 */ /* 0x040fe400078e0a05 */ /*0290*/ IMAD.IADD R3, R4, 0x1, R3 ; /* 0x0000000104037824 */ /* 0x000fc600078e0203 */ /*02a0*/ LOP3.LUT P1, R4, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302047812 */ /* 0x000fe4000782c0ff */ /*02b0*/ ISETP.GE.U32.AND P0, PT, R3, 0x3, PT ; /* 0x000000030300780c */ /* 0x000fd60003f06070 */ /*02c0*/ @!P1 BRA 0x3c0 ; /* 0x000000f000009947 */ /* 0x000fea0003800000 */ /*02d0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */ /* 0x000fc800078e00ff */ /*02e0*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */ /* 0x000fc800078e0202 */ /*02f0*/ IMAD.MOV.U32 R6, RZ, RZ, R2 ; /* 0x000000ffff067224 */ /* 0x000fe400078e0002 */ /*0300*/ IMAD.MOV.U32 R9, RZ, RZ, R3 ; /* 0x000000ffff097224 */ /* 0x000fe400078e0003 */ /*0310*/ IMAD.MOV.U32 R2, RZ, RZ, R6 ; /* 0x000000ffff027224 */ /* 0x001fe400078e0006 */ /*0320*/ IMAD.MOV.U32 R3, RZ, RZ, R9 ; /* 0x000000ffff037224 */ /* 0x000fca00078e0009 */ /*0330*/ LDG.E R6, [R2.64] ; /* 0x0000000402067981 */ /* 0x000ea2000c1e1900 */ /*0340*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */ /* 0x000fe40007ffe0ff */ /*0350*/ IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105057810 */ /* 0x000fe40007ffe0ff */ /*0360*/ ISETP.NE.AND P1, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fe40003f25270 */ /*0370*/ IADD3 R7, R6, 0x1, RZ ; /* 0x0000000106077810 */ /* 0x004fe40007ffe0ff */ /*0380*/ IADD3 R6, P2, R2, 0x4, RZ ; /* 0x0000000402067810 */ /* 0x000fc60007f5e0ff */ /*0390*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0001e4000c101904 */ /*03a0*/ IMAD.X R9, RZ, RZ, R3, P2 ; /* 0x000000ffff097224 */ /* 0x000fc800010e0603 */ /*03b0*/ @P1 BRA 0x310 ; /* 0xffffff5000001947 */ /* 0x000fea000383ffff */ /*03c0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*03d0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*03e0*/ IMAD.IADD R4, R0, 0x1, -R5 ; /* 0x0000000100047824 */ /* 0x000fe200078e0a05 */ /*03f0*/ BSSY B0, 0x810 ; /* 0x0000041000007945 */ /* 0x000fe20003800000 */ /*0400*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */ /* 0x001fc600078e00ff */ /*0410*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */ /* 0x000fe20003f24270 */ /*0420*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */ /* 0x000fca00078e0202 */ /*0430*/ IADD3 R2, P0, R2, 0x8, RZ ; /* 0x0000000802027810 */ /* 0x000fca0007f1e0ff */ /*0440*/ IMAD.X R3, RZ, RZ, R3, P0 ; /* 0x000000ffff037224 */ /* 0x000fe200000e0603 */ /*0450*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fe20003f0f070 */ /*0460*/ @!P1 BRA 0x800 ; /* 0x0000039000009947 */ /* 0x000fee0003800000 */ /*0470*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0480*/ IADD3 R4, R0, -0xc, RZ ; /* 0xfffffff400047810 */ /* 0x000fc60007ffe0ff */ /*0490*/ LDG.E R16, [R2.64] ; /* 0x0000000402107981 */ /* 0x000ea8000c1e1900 */ /*04a0*/ LDG.E R13, [R2.64+-0x8] ; /* 0xfffff804020d7981 */ /* 0x000ee8000c1e1900 */ /*04b0*/ LDG.E R14, [R2.64+-0x4] ; /* 0xfffffc04020e7981 */ /* 0x000f28000c1e1900 */ /*04c0*/ LDG.E R18, [R2.64+0x4] ; /* 0x0000040402127981 */ /* 0x000f68000c1e1900 */ /*04d0*/ LDG.E R11, [R2.64+0x20] ; /* 0x00002004020b7981 */ /* 0x000f68000c1e1900 */ /*04e0*/ LDG.E R6, [R2.64+0x28] ; /* 0x0000280402067981 */ /* 0x000f68000c1e1900 */ /*04f0*/ LDG.E R20, [R2.64+0x8] ; /* 0x0000080402147981 */ /* 0x000f68000c1e1900 */ /*0500*/ LDG.E R22, [R2.64+0xc] ; /* 0x00000c0402167981 */ /* 0x000f68000c1e1900 */ /*0510*/ LDG.E R24, [R2.64+0x10] ; /* 0x0000100402187981 */ /* 0x000f68000c1e1900 */ /*0520*/ LDG.E R25, [R2.64+0x14] ; /* 0x0000140402197981 */ /* 0x000f68000c1e1900 */ /*0530*/ LDG.E R26, [R2.64+0x18] ; /* 0x00001804021a7981 */ /* 0x000f68000c1e1900 */ /*0540*/ LDG.E R12, [R2.64+0x1c] ; /* 0x00001c04020c7981 */ /* 0x000f68000c1e1900 */ /*0550*/ LDG.E R10, [R2.64+0x24] ; /* 0x00002404020a7981 */ /* 0x000f68000c1e1900 */ /*0560*/ LDG.E R9, [R2.64+0x2c] ; /* 0x00002c0402097981 */ /* 0x000f68000c1e1900 */ /*0570*/ LDG.E R8, [R2.64+0x30] ; /* 0x0000300402087981 */ /* 0x000f68000c1e1900 */ /*0580*/ LDG.E R7, [R2.64+0x34] ; /* 0x0000340402077981 */ /* 0x000f62000c1e1900 */ /*0590*/ IADD3 R5, R5, 0x10, RZ ; /* 0x0000001005057810 */ /* 0x000fc80007ffe0ff */ /*05a0*/ ISETP.GE.AND P1, PT, R5, R4, PT ; /* 0x000000040500720c */ /* 0x000fe40003f26270 */ /*05b0*/ IADD3 R17, R16, 0x1, RZ ; /* 0x0000000110117810 */ /* 0x004fe40007ffe0ff */ /*05c0*/ IADD3 R13, R13, 0x1, RZ ; /* 0x000000010d0d7810 */ /* 0x008fc60007ffe0ff */ /*05d0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */ /* 0x0001e2000c101904 */ /*05e0*/ IADD3 R15, R14, 0x1, RZ ; /* 0x000000010e0f7810 */ /* 0x010fe40007ffe0ff */ /*05f0*/ IADD3 R19, R18, 0x1, RZ ; /* 0x0000000112137810 */ /* 0x020fe20007ffe0ff */ /*0600*/ STG.E [R2.64+-0x8], R13 ; /* 0xfffff80d02007986 */ /* 0x0003e2000c101904 */ /*0610*/ IADD3 R11, R11, 0x1, RZ ; /* 0x000000010b0b7810 */ /* 0x000fc60007ffe0ff */ /*0620*/ STG.E [R2.64+-0x4], R15 ; /* 0xfffffc0f02007986 */ /* 0x000fe2000c101904 */ /*0630*/ IADD3 R17, R6, 0x1, RZ ; /* 0x0000000106117810 */ /* 0x001fe40007ffe0ff */ /*0640*/ IADD3 R6, P2, R2, 0x40, RZ ; /* 0x0000004002067810 */ /* 0x000fe20007f5e0ff */ /*0650*/ STG.E [R2.64+0x4], R19 ; /* 0x0000041302007986 */ /* 0x000fe2000c101904 */ /*0660*/ IADD3 R21, R20, 0x1, RZ ; /* 0x0000000114157810 */ /* 0x000fc60007ffe0ff */ /*0670*/ STG.E [R2.64+0x20], R11 ; /* 0x0000200b02007986 */ /* 0x0001e2000c101904 */ /*0680*/ IADD3 R23, R22, 0x1, RZ ; /* 0x0000000116177810 */ /* 0x000fe40007ffe0ff */ /*0690*/ IADD3 R13, R24, 0x1, RZ ; /* 0x00000001180d7810 */ /* 0x002fe40007ffe0ff */ /*06a0*/ IADD3 R25, R25, 0x1, RZ ; /* 0x0000000119197810 */ /* 0x000fe40007ffe0ff */ /*06b0*/ IADD3 R27, R26, 0x1, RZ ; /* 0x000000011a1b7810 */ /* 0x000fe20007ffe0ff */ /*06c0*/ IMAD.X R11, RZ, RZ, R3, P2 ; /* 0x000000ffff0b7224 */ /* 0x001fe200010e0603 */ /*06d0*/ IADD3 R29, R12, 0x1, RZ ; /* 0x000000010c1d7810 */ /* 0x000fe40007ffe0ff */ /*06e0*/ IADD3 R15, R10, 0x1, RZ ; /* 0x000000010a0f7810 */ /* 0x000fc40007ffe0ff */ /*06f0*/ IADD3 R9, R9, 0x1, RZ ; /* 0x0000000109097810 */ /* 0x000fe40007ffe0ff */ /*0700*/ IADD3 R19, R8, 0x1, RZ ; /* 0x0000000108137810 */ /* 0x000fe40007ffe0ff */ /*0710*/ IADD3 R7, R7, 0x1, RZ ; /* 0x0000000107077810 */ /* 0x000fe20007ffe0ff */ /*0720*/ STG.E [R2.64+0x8], R21 ; /* 0x0000081502007986 */ /* 0x000fe8000c101904 */ /*0730*/ STG.E [R2.64+0xc], R23 ; /* 0x00000c1702007986 */ /* 0x000fe8000c101904 */ /*0740*/ STG.E [R2.64+0x10], R13 ; /* 0x0000100d02007986 */ /* 0x000fe8000c101904 */ /*0750*/ STG.E [R2.64+0x14], R25 ; /* 0x0000141902007986 */ /* 0x000fe8000c101904 */ /*0760*/ STG.E [R2.64+0x18], R27 ; /* 0x0000181b02007986 */ /* 0x000fe8000c101904 */ /*0770*/ STG.E [R2.64+0x1c], R29 ; /* 0x00001c1d02007986 */ /* 0x000fe8000c101904 */ /*0780*/ STG.E [R2.64+0x24], R15 ; /* 0x0000240f02007986 */ /* 0x000fe8000c101904 */ /*0790*/ STG.E [R2.64+0x28], R17 ; /* 0x0000281102007986 */ /* 0x000fe8000c101904 */ /*07a0*/ STG.E [R2.64+0x2c], R9 ; /* 0x00002c0902007986 */ /* 0x000fe8000c101904 */ /*07b0*/ STG.E [R2.64+0x30], R19 ; /* 0x0000301302007986 */ /* 0x000fe8000c101904 */ /*07c0*/ STG.E [R2.64+0x34], R7 ; /* 0x0000340702007986 */ /* 0x0001e4000c101904 */ /*07d0*/ IMAD.MOV.U32 R2, RZ, RZ, R6 ; /* 0x000000ffff027224 */ /* 0x001fc400078e0006 */ /*07e0*/ IMAD.MOV.U32 R3, RZ, RZ, R11 ; /* 0x000000ffff037224 */ /* 0x000fe200078e000b */ /*07f0*/ @!P1 BRA 0x490 ; /* 0xfffffc9000009947 */ /* 0x000fea000383ffff */ /*0800*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0810*/ IMAD.IADD R4, R0, 0x1, -R5 ; /* 0x0000000100047824 */ /* 0x000fe200078e0a05 */ /*0820*/ BSSY B0, 0xa40 ; /* 0x0000021000007945 */ /* 0x000fe80003800000 */ /*0830*/ ISETP.GT.AND P1, PT, R4, 0x4, PT ; /* 0x000000040400780c */ /* 0x000fda0003f24270 */ /*0840*/ @!P1 BRA 0xa30 ; /* 0x000001e000009947 */ /* 0x000fea0003800000 */ /*0850*/ LDG.E R4, [R2.64+-0x8] ; /* 0xfffff80402047981 */ /* 0x000ea8000c1e1900 */ /*0860*/ LDG.E R6, [R2.64+-0x4] ; /* 0xfffffc0402067981 */ /* 0x000ee8000c1e1900 */ /*0870*/ LDG.E R8, [R2.64] ; /* 0x0000000402087981 */ /* 0x000f28000c1e1900 */ /*0880*/ LDG.E R10, [R2.64+0x4] ; /* 0x00000404020a7981 */ /* 0x000f68000c1e1900 */ /*0890*/ LDG.E R12, [R2.64+0x8] ; /* 0x00000804020c7981 */ /* 0x000f68000c1e1900 */ /*08a0*/ LDG.E R14, [R2.64+0xc] ; /* 0x00000c04020e7981 */ /* 0x000f68000c1e1900 */ /*08b0*/ LDG.E R16, [R2.64+0x10] ; /* 0x0000100402107981 */ /* 0x000f68000c1e1900 */ /*08c0*/ LDG.E R18, [R2.64+0x14] ; /* 0x0000140402127981 */ /* 0x000f62000c1e1900 */ /*08d0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fc40003f0e170 */ /*08e0*/ IADD3 R5, R5, 0x8, RZ ; /* 0x0000000805057810 */ /* 0x000fe40007ffe0ff */ /*08f0*/ IADD3 R7, R4, 0x1, RZ ; /* 0x0000000104077810 */ /* 0x004fe40007ffe0ff */ /*0900*/ IADD3 R4, P1, R2, 0x20, RZ ; /* 0x0000002002047810 */ /* 0x000fe40007f3e0ff */ /*0910*/ IADD3 R9, R6, 0x1, RZ ; /* 0x0000000106097810 */ /* 0x008fe20007ffe0ff */ /*0920*/ STG.E [R2.64+-0x8], R7 ; /* 0xfffff80702007986 */ /* 0x0001e2000c101904 */ /*0930*/ IADD3 R11, R8, 0x1, RZ ; /* 0x00000001080b7810 */ /* 0x010fc60007ffe0ff */ /*0940*/ STG.E [R2.64+-0x4], R9 ; /* 0xfffffc0902007986 */ /* 0x000fe2000c101904 */ /*0950*/ IADD3 R13, R10, 0x1, RZ ; /* 0x000000010a0d7810 */ /* 0x020fc60007ffe0ff */ /*0960*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */ /* 0x000fe2000c101904 */ /*0970*/ IADD3 R15, R12, 0x1, RZ ; /* 0x000000010c0f7810 */ /* 0x000fe20007ffe0ff */ /*0980*/ IMAD.X R7, RZ, RZ, R3, P1 ; /* 0x000000ffff077224 */ /* 0x001fe400008e0603 */ /*0990*/ STG.E [R2.64+0x4], R13 ; /* 0x0000040d02007986 */ /* 0x000fe2000c101904 */ /*09a0*/ IADD3 R17, R14, 0x1, RZ ; /* 0x000000010e117810 */ /* 0x000fc60007ffe0ff */ /*09b0*/ STG.E [R2.64+0x8], R15 ; /* 0x0000080f02007986 */ /* 0x000fe2000c101904 */ /*09c0*/ IADD3 R19, R16, 0x1, RZ ; /* 0x0000000110137810 */ /* 0x000fc60007ffe0ff */ /*09d0*/ STG.E [R2.64+0xc], R17 ; /* 0x00000c1102007986 */ /* 0x000fe2000c101904 */ /*09e0*/ IADD3 R21, R18, 0x1, RZ ; /* 0x0000000112157810 */ /* 0x000fc60007ffe0ff */ /*09f0*/ STG.E [R2.64+0x10], R19 ; /* 0x0000101302007986 */ /* 0x000fe8000c101904 */ /*0a00*/ STG.E [R2.64+0x14], R21 ; /* 0x0000141502007986 */ /* 0x0001e4000c101904 */ /*0a10*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */ /* 0x001fe400078e0004 */ /*0a20*/ IMAD.MOV.U32 R3, RZ, RZ, R7 ; /* 0x000000ffff037224 */ /* 0x000fe400078e0007 */ /*0a30*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0a40*/ ISETP.LT.OR P0, PT, R5, R0, P0 ; /* 0x000000000500720c */ /* 0x000fda0000701670 */ /*0a50*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0a60*/ LDG.E R0, [R2.64+-0x8] ; /* 0xfffff80402007981 */ /* 0x000ea8000c1e1900 */ /*0a70*/ LDG.E R4, [R2.64+-0x4] ; /* 0xfffffc0402047981 */ /* 0x000ee8000c1e1900 */ /*0a80*/ LDG.E R6, [R2.64] ; /* 0x0000000402067981 */ /* 0x000f28000c1e1900 */ /*0a90*/ LDG.E R8, [R2.64+0x4] ; /* 0x0000040402087981 */ /* 0x000f62000c1e1900 */ /*0aa0*/ IADD3 R5, R0, 0x1, RZ ; /* 0x0000000100057810 */ /* 0x004fc40007ffe0ff */ /*0ab0*/ IADD3 R7, R4, 0x1, RZ ; /* 0x0000000104077810 */ /* 0x008fc60007ffe0ff */ /*0ac0*/ STG.E [R2.64+-0x8], R5 ; /* 0xfffff80502007986 */ /* 0x000fe2000c101904 */ /*0ad0*/ IADD3 R9, R6, 0x1, RZ ; /* 0x0000000106097810 */ /* 0x010fc60007ffe0ff */ /*0ae0*/ STG.E [R2.64+-0x4], R7 ; /* 0xfffffc0702007986 */ /* 0x000fe2000c101904 */ /*0af0*/ IADD3 R11, R8, 0x1, RZ ; /* 0x00000001080b7810 */ /* 0x020fc60007ffe0ff */ /*0b00*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x000fe8000c101904 */ /*0b10*/ STG.E [R2.64+0x4], R11 ; /* 0x0000040b02007986 */ /* 0x000fe2000c101904 */ /*0b20*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0b30*/ BRA 0xb30; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0b40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b80*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0b90*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ba0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0bb0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0bc0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0bd0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0be0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0bf0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z7kernel1Pi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e220000002500 */ /*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */ /* 0x001fca00078e0203 */ /*0060*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fca00078e0205 */ /*0070*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */ /* 0x000ea4000c1e1900 */ /*0080*/ IADD3 R5, R0, 0x1, RZ ; /* 0x0000000100057810 */ /* 0x004fca0007ffe0ff */ /*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*00a0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> __global__ void kernel1(int* d_data) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; d_data[tid] += 1; } __global__ void kernel2(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; const int numElementPerThread = numElement/nthread; const int start = tid*numElementPerThread; int end = start + numElementPerThread; for(int i = start; i < end; i++) { d_data[i] += 1; } } // __global__ void kernel2_opt(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; for(int i = tid; i < numElement; i += nthread) { d_data[i] += 1; } } void demo1() { const int numElement = 512*1024; int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i] + 1; } int* d_data; cudaMalloc(&d_data, sizeof(int)*numElement); cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); kernel1<<<1024, 512>>>(d_data); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("Kernel elapsed time: %.3f ms\n", elapsedTime); printf("kernel1: %s\n", cudaGetErrorString(cudaGetLastError())); cudaMemcpy(h_data, d_data, sizeof(int)*numElement, cudaMemcpyDeviceToHost); cudaFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED.\n"); return; } } printf("Test pass...\n"); free(h_data); free(gold); } void demo2(const int numElement) { printf("numElement = %d\n", numElement); int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i]; } int* d_data; cudaMalloc(&d_data, sizeof(int)*numElement); cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); float elapsedTime = 0.0f; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /*CPU*/ elapsedTime = 0.0f; cudaEventRecord(start, 0); for(int i = 0; i < numElement; i++) { gold[i] += 1; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("CPU elapsed time: %.3f ms\n", elapsedTime); /*GPU method 1*/ elapsedTime = 0.0f; cudaEventRecord(start, 0); kernel2<<<1024, 512>>>(d_data, numElement); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("kernel2 elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", cudaGetErrorString(cudaGetLastError())); /*GPU method 2*/ cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); cudaEventRecord(start, 0); kernel2_opt<<<1024, 512>>>(d_data, numElement); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("kernel2_opt elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", cudaGetErrorString(cudaGetLastError())); cudaMemcpy(h_data, d_data, sizeof(int)*numElement, cudaMemcpyDeviceToHost); cudaFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED. i = %d: %d, %d\n", i, h_data[i], gold[i]); return; } } printf("Test pass...\n"); free(h_data); free(gold); } int main() { int numElement = 1*1024*1024; demo2(numElement); //execute once to warm up for performance measurement printf("\n\nstart ............................................\n"); printf("demo2 started!\n"); for(int i = numElement; i <= 32*1024*1024; i*=2) { demo2(i); printf("\n"); } printf("demo1 started!\n"); demo1(); return EXIT_SUCCESS; }
.file "tmpxft_0001852a_00000000-6_demo2.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z26__device_stub__Z7kernel1PiPi .type _Z26__device_stub__Z7kernel1PiPi, @function _Z26__device_stub__Z7kernel1PiPi: .LFB2084: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 88(%rsp), %rax subq %fs:40, %rax jne .L8 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z7kernel1Pi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z26__device_stub__Z7kernel1PiPi, .-_Z26__device_stub__Z7kernel1PiPi .globl _Z7kernel1Pi .type _Z7kernel1Pi, @function _Z7kernel1Pi: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z7kernel1PiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z7kernel1Pi, .-_Z7kernel1Pi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Kernel elapsed time: %.3f ms\n" .LC1: .string "kernel1: %s\n" .LC2: .string "!!!ERROR, TEST FAILED.\n" .LC3: .string "Test pass...\n" .text .globl _Z5demo1v .type _Z5demo1v, @function _Z5demo1v: .LFB2057: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $64, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $2097152, %edi call malloc@PLT movq %rax, %rbp movl $2097152, %edi call malloc@PLT movq %rax, %r12 movl $0, %ebx .L12: call rand@PLT movl %eax, 0(%rbp,%rbx) addl $1, %eax movl %eax, (%r12,%rbx) addq $4, %rbx cmpq $2097152, %rbx jne .L12 leaq 8(%rsp), %rdi movl $2097152, %esi call cudaMalloc@PLT movl $1, %ecx movl $2097152, %edx movq %rbp, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT leaq 16(%rsp), %rdi call cudaEventCreate@PLT leaq 24(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 16(%rsp), %rdi call cudaEventRecord@PLT movl $512, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1024, 32(%rsp) movl $1, 36(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L21 .L13: movl $0, %esi movq 24(%rsp), %rdi call cudaEventRecord@PLT movq 24(%rsp), %rdi call cudaEventSynchronize@PLT leaq 44(%rsp), %rdi movq 24(%rsp), %rdx movq 16(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 44(%rsp), %xmm0 leaq .LC0(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $2, %ecx movl $2097152, %edx movq 8(%rsp), %rsi movq %rbp, %rdi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movl $0, %eax .L16: movl (%r12,%rax), %ecx cmpl %ecx, 0(%rbp,%rax) jne .L22 addq $4, %rax cmpq $2097152, %rax jne .L16 leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %rbp, %rdi call free@PLT movq %r12, %rdi call free@PLT jmp .L11 .L21: movq 8(%rsp), %rdi call _Z26__device_stub__Z7kernel1PiPi jmp .L13 .L22: leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L11: movq 56(%rsp), %rax subq %fs:40, %rax jne .L23 addq $64, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z5demo1v, .-_Z5demo1v .globl _Z27__device_stub__Z7kernel2PiiPii .type _Z27__device_stub__Z7kernel2PiiPii, @function _Z27__device_stub__Z7kernel2PiiPii: .LFB2086: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L28 .L24: movq 104(%rsp), %rax subq %fs:40, %rax jne .L29 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L28: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z7kernel2Pii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L24 .L29: call __stack_chk_fail@PLT .cfi_endproc .LFE2086: .size _Z27__device_stub__Z7kernel2PiiPii, .-_Z27__device_stub__Z7kernel2PiiPii .globl _Z7kernel2Pii .type _Z7kernel2Pii, @function _Z7kernel2Pii: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z27__device_stub__Z7kernel2PiiPii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _Z7kernel2Pii, .-_Z7kernel2Pii .globl _Z32__device_stub__Z11kernel2_optPiiPii .type _Z32__device_stub__Z11kernel2_optPiiPii, @function _Z32__device_stub__Z11kernel2_optPiiPii: .LFB2088: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L36 .L32: movq 104(%rsp), %rax subq %fs:40, %rax jne .L37 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L36: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z11kernel2_optPii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L32 .L37: call __stack_chk_fail@PLT .cfi_endproc .LFE2088: .size _Z32__device_stub__Z11kernel2_optPiiPii, .-_Z32__device_stub__Z11kernel2_optPiiPii .globl _Z11kernel2_optPii .type _Z11kernel2_optPii, @function _Z11kernel2_optPii: .LFB2089: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z11kernel2_optPiiPii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _Z11kernel2_optPii, .-_Z11kernel2_optPii .section .rodata.str1.1 .LC4: .string "numElement = %d\n" .LC6: .string "CPU elapsed time: %.3f ms\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC7: .string "kernel2 elapsed time: %.3f ms\n" .section .rodata.str1.1 .LC8: .string "kernel2: %s\n" .section .rodata.str1.8 .align 8 .LC9: .string "kernel2_opt elapsed time: %.3f ms\n" .align 8 .LC10: .string "!!!ERROR, TEST FAILED. i = %d: %d, %d\n" .text .globl _Z5demo2i .type _Z5demo2i, @function _Z5demo2i: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $104, %rsp .cfi_def_cfa_offset 160 movl %edi, %ebx movl %edi, 28(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax movl %edi, %edx leaq .LC4(%rip), %rsi movl $2, %edi call __printf_chk@PLT movslq %ebx, %rax movq %rax, 8(%rsp) leaq 0(,%rax,4), %r15 movq %r15, 16(%rsp) movq %r15, %rdi call malloc@PLT movq %rax, %r13 movq %r15, %rdi call malloc@PLT movq %rax, %r14 testl %ebx, %ebx jle .L41 movq %r13, %rbp movq %rax, %rbx addq %r13, %r15 movq %rax, %r12 .L42: call rand@PLT movl %eax, 0(%rbp) movl %eax, (%r12) addq $4, %rbp addq $4, %r12 cmpq %r15, %rbp jne .L42 leaq 40(%rsp), %rdi movq 16(%rsp), %r15 movq %r15, %rsi call cudaMalloc@PLT movl $1, %ecx movq %r15, %rdx movq %r13, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $0x00000000, 36(%rsp) leaq 48(%rsp), %rdi call cudaEventCreate@PLT leaq 56(%rsp), %rdi call cudaEventCreate@PLT movl $0x00000000, 36(%rsp) movl $0, %esi movq 48(%rsp), %rdi call cudaEventRecord@PLT leaq (%r14,%r15), %rax .L43: addl $1, (%rbx) addq $4, %rbx cmpq %rax, %rbx jne .L43 .L50: movl $0, %esi movq 56(%rsp), %rdi call cudaEventRecord@PLT movq 56(%rsp), %rdi call cudaEventSynchronize@PLT leaq 36(%rsp), %rdi movq 56(%rsp), %rdx movq 48(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 36(%rsp), %xmm0 leaq .LC6(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl $0x00000000, 36(%rsp) movl $0, %esi movq 48(%rsp), %rdi call cudaEventRecord@PLT movl $512, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) movl $1024, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $0, %r9d movl $0, %r8d movq 76(%rsp), %rdx movl $1, %ecx movq 64(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L56 .L44: movl $0, %esi movq 56(%rsp), %rdi call cudaEventRecord@PLT movq 56(%rsp), %rdi call cudaEventSynchronize@PLT leaq 36(%rsp), %rdi movq 56(%rsp), %rdx movq 48(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 36(%rsp), %xmm0 leaq .LC7(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %ecx movq 16(%rsp), %rdx movq %r13, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $0, %esi movq 48(%rsp), %rdi call cudaEventRecord@PLT movl $512, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) movl $1024, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $0, %r9d movl $0, %r8d movq 76(%rsp), %rdx movl $1, %ecx movq 64(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L57 .L45: movl $0, %esi movq 56(%rsp), %rdi call cudaEventRecord@PLT movq 56(%rsp), %rdi call cudaEventSynchronize@PLT leaq 36(%rsp), %rdi movq 56(%rsp), %rdx movq 48(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 36(%rsp), %xmm0 leaq .LC9(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $2, %ecx movq 16(%rsp), %rdx movq 40(%rsp), %rsi movq %r13, %rdi call cudaMemcpy@PLT movq 40(%rsp), %rdi call cudaFree@PLT cmpl $0, 28(%rsp) jle .L46 movl $0, %edx .L49: movl 0(%r13,%rdx,4), %ecx movl (%r14,%rdx,4), %r8d cmpl %r8d, %ecx jne .L58 addq $1, %rdx cmpq %rdx, 8(%rsp) jne .L49 .L46: leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %r13, %rdi call free@PLT movq %r14, %rdi call free@PLT .L40: movq 88(%rsp), %rax subq %fs:40, %rax jne .L59 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L56: .cfi_restore_state movl 28(%rsp), %esi movq 40(%rsp), %rdi call _Z27__device_stub__Z7kernel2PiiPii jmp .L44 .L57: movl 28(%rsp), %esi movq 40(%rsp), %rdi call _Z32__device_stub__Z11kernel2_optPiiPii jmp .L45 .L58: leaq .LC10(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L40 .L41: leaq 40(%rsp), %rdi movq 16(%rsp), %rbx movq %rbx, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbx, %rdx movq %r13, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $0x00000000, 36(%rsp) leaq 48(%rsp), %rdi call cudaEventCreate@PLT leaq 56(%rsp), %rdi call cudaEventCreate@PLT movl $0x00000000, 36(%rsp) movl $0, %esi movq 48(%rsp), %rdi call cudaEventRecord@PLT jmp .L50 .L59: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size _Z5demo2i, .-_Z5demo2i .section .rodata.str1.8 .align 8 .LC11: .string "\n\nstart ............................................\n" .section .rodata.str1.1 .LC12: .string "demo2 started!\n" .LC13: .string "\n" .LC14: .string "demo1 started!\n" .text .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movl $1048576, %edi call _Z5demo2i leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC12(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $6, %ebp movl $1048576, %ebx leaq .LC13(%rip), %r12 .L61: movl %ebx, %edi call _Z5demo2i movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl %ebx, %ebx subl $1, %ebp jne .L61 leaq .LC14(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call _Z5demo1v movl $0, %eax popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.1 .LC15: .string "_Z11kernel2_optPii" .LC16: .string "_Z7kernel2Pii" .LC17: .string "_Z7kernel1Pi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2091: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC15(%rip), %rdx movq %rdx, %rcx leaq _Z11kernel2_optPii(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC16(%rip), %rdx movq %rdx, %rcx leaq _Z7kernel2Pii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC17(%rip), %rdx movq %rdx, %rcx leaq _Z7kernel1Pi(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2091: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> __global__ void kernel1(int* d_data) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; d_data[tid] += 1; } __global__ void kernel2(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; const int numElementPerThread = numElement/nthread; const int start = tid*numElementPerThread; int end = start + numElementPerThread; for(int i = start; i < end; i++) { d_data[i] += 1; } } // __global__ void kernel2_opt(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; for(int i = tid; i < numElement; i += nthread) { d_data[i] += 1; } } void demo1() { const int numElement = 512*1024; int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i] + 1; } int* d_data; cudaMalloc(&d_data, sizeof(int)*numElement); cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); kernel1<<<1024, 512>>>(d_data); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("Kernel elapsed time: %.3f ms\n", elapsedTime); printf("kernel1: %s\n", cudaGetErrorString(cudaGetLastError())); cudaMemcpy(h_data, d_data, sizeof(int)*numElement, cudaMemcpyDeviceToHost); cudaFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED.\n"); return; } } printf("Test pass...\n"); free(h_data); free(gold); } void demo2(const int numElement) { printf("numElement = %d\n", numElement); int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i]; } int* d_data; cudaMalloc(&d_data, sizeof(int)*numElement); cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); float elapsedTime = 0.0f; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /*CPU*/ elapsedTime = 0.0f; cudaEventRecord(start, 0); for(int i = 0; i < numElement; i++) { gold[i] += 1; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("CPU elapsed time: %.3f ms\n", elapsedTime); /*GPU method 1*/ elapsedTime = 0.0f; cudaEventRecord(start, 0); kernel2<<<1024, 512>>>(d_data, numElement); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("kernel2 elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", cudaGetErrorString(cudaGetLastError())); /*GPU method 2*/ cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); cudaEventRecord(start, 0); kernel2_opt<<<1024, 512>>>(d_data, numElement); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("kernel2_opt elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", cudaGetErrorString(cudaGetLastError())); cudaMemcpy(h_data, d_data, sizeof(int)*numElement, cudaMemcpyDeviceToHost); cudaFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED. i = %d: %d, %d\n", i, h_data[i], gold[i]); return; } } printf("Test pass...\n"); free(h_data); free(gold); } int main() { int numElement = 1*1024*1024; demo2(numElement); //execute once to warm up for performance measurement printf("\n\nstart ............................................\n"); printf("demo2 started!\n"); for(int i = numElement; i <= 32*1024*1024; i*=2) { demo2(i); printf("\n"); } printf("demo1 started!\n"); demo1(); return EXIT_SUCCESS; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> __global__ void kernel1(int* d_data) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; d_data[tid] += 1; } __global__ void kernel2(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; const int numElementPerThread = numElement/nthread; const int start = tid*numElementPerThread; int end = start + numElementPerThread; for(int i = start; i < end; i++) { d_data[i] += 1; } } // __global__ void kernel2_opt(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; for(int i = tid; i < numElement; i += nthread) { d_data[i] += 1; } } void demo1() { const int numElement = 512*1024; int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i] + 1; } int* d_data; hipMalloc(&d_data, sizeof(int)*numElement); hipMemcpy(d_data, h_data, sizeof(int)*numElement, hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); kernel1<<<1024, 512>>>(d_data); hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("Kernel elapsed time: %.3f ms\n", elapsedTime); printf("kernel1: %s\n", hipGetErrorString(hipGetLastError())); hipMemcpy(h_data, d_data, sizeof(int)*numElement, hipMemcpyDeviceToHost); hipFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED.\n"); return; } } printf("Test pass...\n"); free(h_data); free(gold); } void demo2(const int numElement) { printf("numElement = %d\n", numElement); int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i]; } int* d_data; hipMalloc(&d_data, sizeof(int)*numElement); hipMemcpy(d_data, h_data, sizeof(int)*numElement, hipMemcpyHostToDevice); float elapsedTime = 0.0f; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); /*CPU*/ elapsedTime = 0.0f; hipEventRecord(start, 0); for(int i = 0; i < numElement; i++) { gold[i] += 1; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("CPU elapsed time: %.3f ms\n", elapsedTime); /*GPU method 1*/ elapsedTime = 0.0f; hipEventRecord(start, 0); kernel2<<<1024, 512>>>(d_data, numElement); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("kernel2 elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", hipGetErrorString(hipGetLastError())); /*GPU method 2*/ hipMemcpy(d_data, h_data, sizeof(int)*numElement, hipMemcpyHostToDevice); hipEventRecord(start, 0); kernel2_opt<<<1024, 512>>>(d_data, numElement); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("kernel2_opt elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", hipGetErrorString(hipGetLastError())); hipMemcpy(h_data, d_data, sizeof(int)*numElement, hipMemcpyDeviceToHost); hipFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED. i = %d: %d, %d\n", i, h_data[i], gold[i]); return; } } printf("Test pass...\n"); free(h_data); free(gold); } int main() { int numElement = 1*1024*1024; demo2(numElement); //execute once to warm up for performance measurement printf("\n\nstart ............................................\n"); printf("demo2 started!\n"); for(int i = numElement; i <= 32*1024*1024; i*=2) { demo2(i); printf("\n"); } printf("demo1 started!\n"); demo1(); return EXIT_SUCCESS; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> __global__ void kernel1(int* d_data) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; d_data[tid] += 1; } __global__ void kernel2(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; const int numElementPerThread = numElement/nthread; const int start = tid*numElementPerThread; int end = start + numElementPerThread; for(int i = start; i < end; i++) { d_data[i] += 1; } } // __global__ void kernel2_opt(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; for(int i = tid; i < numElement; i += nthread) { d_data[i] += 1; } } void demo1() { const int numElement = 512*1024; int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i] + 1; } int* d_data; hipMalloc(&d_data, sizeof(int)*numElement); hipMemcpy(d_data, h_data, sizeof(int)*numElement, hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); kernel1<<<1024, 512>>>(d_data); hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("Kernel elapsed time: %.3f ms\n", elapsedTime); printf("kernel1: %s\n", hipGetErrorString(hipGetLastError())); hipMemcpy(h_data, d_data, sizeof(int)*numElement, hipMemcpyDeviceToHost); hipFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED.\n"); return; } } printf("Test pass...\n"); free(h_data); free(gold); } void demo2(const int numElement) { printf("numElement = %d\n", numElement); int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i]; } int* d_data; hipMalloc(&d_data, sizeof(int)*numElement); hipMemcpy(d_data, h_data, sizeof(int)*numElement, hipMemcpyHostToDevice); float elapsedTime = 0.0f; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); /*CPU*/ elapsedTime = 0.0f; hipEventRecord(start, 0); for(int i = 0; i < numElement; i++) { gold[i] += 1; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("CPU elapsed time: %.3f ms\n", elapsedTime); /*GPU method 1*/ elapsedTime = 0.0f; hipEventRecord(start, 0); kernel2<<<1024, 512>>>(d_data, numElement); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("kernel2 elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", hipGetErrorString(hipGetLastError())); /*GPU method 2*/ hipMemcpy(d_data, h_data, sizeof(int)*numElement, hipMemcpyHostToDevice); hipEventRecord(start, 0); kernel2_opt<<<1024, 512>>>(d_data, numElement); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("kernel2_opt elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", hipGetErrorString(hipGetLastError())); hipMemcpy(h_data, d_data, sizeof(int)*numElement, hipMemcpyDeviceToHost); hipFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED. i = %d: %d, %d\n", i, h_data[i], gold[i]); return; } } printf("Test pass...\n"); free(h_data); free(gold); } int main() { int numElement = 1*1024*1024; demo2(numElement); //execute once to warm up for performance measurement printf("\n\nstart ............................................\n"); printf("demo2 started!\n"); for(int i = numElement; i <= 32*1024*1024; i*=2) { demo2(i); printf("\n"); } printf("demo1 started!\n"); demo1(); return EXIT_SUCCESS; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z7kernel1Pi .globl _Z7kernel1Pi .p2align 8 .type _Z7kernel1Pi,@function _Z7kernel1Pi: s_clause 0x1 s_load_b32 s2, s[0:1], 0x14 s_load_b64 s[0:1], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_load_b32 v2, v[0:1], off s_waitcnt vmcnt(0) v_add_nc_u32_e32 v2, 1, v2 global_store_b32 v[0:1], v2, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z7kernel1Pi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 264 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 3 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z7kernel1Pi, .Lfunc_end0-_Z7kernel1Pi .section .AMDGPU.csdata,"",@progbits .text .protected _Z7kernel2Pii .globl _Z7kernel2Pii .p2align 8 .type _Z7kernel2Pii,@function _Z7kernel2Pii: s_clause 0x2 s_load_b32 s2, s[0:1], 0x1c s_load_b32 s4, s[0:1], 0x10 s_load_b32 s5, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_ashr_i32 s8, s5, 31 s_mul_i32 s2, s4, s3 s_add_i32 s5, s5, s8 s_ashr_i32 s4, s2, 31 s_xor_b32 s5, s5, s8 s_add_i32 s2, s2, s4 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) s_xor_b32 s2, s2, s4 s_xor_b32 s4, s8, s4 v_cvt_f32_u32_e32 v1, s2 s_sub_i32 s7, 0, s2 v_rcp_iflag_f32_e32 v1, v1 s_waitcnt_depctr 0xfff v_mul_f32_e32 v1, 0x4f7ffffe, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cvt_u32_f32_e32 v1, v1 v_readfirstlane_b32 s6, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_i32 s7, s7, s6 s_mul_hi_u32 s7, s6, s7 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_add_i32 s6, s6, s7 s_mul_hi_u32 s6, s5, s6 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_i32 s7, s6, s2 s_sub_i32 s5, s5, s7 s_add_i32 s7, s6, 1 s_sub_i32 s8, s5, s2 s_cmp_ge_u32 s5, s2 s_cselect_b32 s6, s7, s6 s_cselect_b32 s5, s8, s5 s_add_i32 s7, s6, 1 s_cmp_ge_u32 s5, s2 s_cselect_b32 s2, s7, s6 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_xor_b32 s2, s2, s4 s_sub_i32 s4, s2, s4 s_mov_b32 s2, 0 s_cmp_lt_i32 s4, 1 s_cbranch_scc1 .LBB1_3 v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1] s_load_b64 s[0:1], s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v0, v1, s4 v_ashrrev_i32_e32 v1, 31, v0 v_add_nc_u32_e32 v3, s4, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[1:2], 2, v[0:1] s_waitcnt lgkmcnt(0) v_add_co_u32 v1, vcc_lo, s0, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo .LBB1_2: global_load_b32 v4, v[1:2], off v_add_nc_u32_e32 v0, 1, v0 s_delay_alu instid0(VALU_DEP_1) v_cmp_ge_i32_e32 vcc_lo, v0, v3 s_or_b32 s2, vcc_lo, s2 s_waitcnt vmcnt(0) v_add_nc_u32_e32 v4, 1, v4 global_store_b32 v[1:2], v4, off v_add_co_u32 v1, s0, v1, 4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v2, s0, 0, v2, s0 s_and_not1_b32 exec_lo, exec_lo, s2 s_cbranch_execnz .LBB1_2 .LBB1_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z7kernel2Pii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z7kernel2Pii, .Lfunc_end1-_Z7kernel2Pii .section .AMDGPU.csdata,"",@progbits .text .protected _Z11kernel2_optPii .globl _Z11kernel2_optPii .p2align 8 .type _Z11kernel2_optPii,@function _Z11kernel2_optPii: s_clause 0x1 s_load_b32 s4, s[0:1], 0x1c s_load_b32 s6, s[0:1], 0x8 s_add_u32 s2, s0, 16 s_addc_u32 s3, s1, 0 s_mov_b32 s5, exec_lo s_waitcnt lgkmcnt(0) s_and_b32 s4, s4, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1] v_cmpx_gt_i32_e64 s6, v1 s_cbranch_execz .LBB2_3 s_load_b32 s2, s[2:3], 0x0 s_load_b64 s[0:1], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[1:2] s_waitcnt lgkmcnt(0) s_mul_i32 s2, s2, s4 v_add_co_u32 v2, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo s_ashr_i32 s3, s2, 31 s_mov_b32 s1, 0 s_lshl_b64 s[4:5], s[2:3], 2 .LBB2_2: global_load_b32 v0, v[2:3], off v_add_nc_u32_e32 v1, s2, v1 s_delay_alu instid0(VALU_DEP_1) v_cmp_le_i32_e32 vcc_lo, s6, v1 s_or_b32 s1, vcc_lo, s1 s_waitcnt vmcnt(0) v_add_nc_u32_e32 v0, 1, v0 global_store_b32 v[2:3], v0, off v_add_co_u32 v2, s0, v2, s4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, s0, s5, v3, s0 s_and_not1_b32 exec_lo, exec_lo, s1 s_cbranch_execnz .LBB2_2 .LBB2_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11kernel2_optPii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z11kernel2_optPii, .Lfunc_end2-_Z11kernel2_optPii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: hidden_block_count_x - .offset: 12 .size: 4 .value_kind: hidden_block_count_y - .offset: 16 .size: 4 .value_kind: hidden_block_count_z - .offset: 20 .size: 2 .value_kind: hidden_group_size_x - .offset: 22 .size: 2 .value_kind: hidden_group_size_y - .offset: 24 .size: 2 .value_kind: hidden_group_size_z - .offset: 26 .size: 2 .value_kind: hidden_remainder_x - .offset: 28 .size: 2 .value_kind: hidden_remainder_y - .offset: 30 .size: 2 .value_kind: hidden_remainder_z - .offset: 48 .size: 8 .value_kind: hidden_global_offset_x - .offset: 56 .size: 8 .value_kind: hidden_global_offset_y - .offset: 64 .size: 8 .value_kind: hidden_global_offset_z - .offset: 72 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 264 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z7kernel1Pi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z7kernel1Pi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 3 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z7kernel2Pii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z7kernel2Pii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11kernel2_optPii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z11kernel2_optPii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> __global__ void kernel1(int* d_data) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; d_data[tid] += 1; } __global__ void kernel2(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; const int numElementPerThread = numElement/nthread; const int start = tid*numElementPerThread; int end = start + numElementPerThread; for(int i = start; i < end; i++) { d_data[i] += 1; } } // __global__ void kernel2_opt(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; for(int i = tid; i < numElement; i += nthread) { d_data[i] += 1; } } void demo1() { const int numElement = 512*1024; int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i] + 1; } int* d_data; hipMalloc(&d_data, sizeof(int)*numElement); hipMemcpy(d_data, h_data, sizeof(int)*numElement, hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); kernel1<<<1024, 512>>>(d_data); hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("Kernel elapsed time: %.3f ms\n", elapsedTime); printf("kernel1: %s\n", hipGetErrorString(hipGetLastError())); hipMemcpy(h_data, d_data, sizeof(int)*numElement, hipMemcpyDeviceToHost); hipFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED.\n"); return; } } printf("Test pass...\n"); free(h_data); free(gold); } void demo2(const int numElement) { printf("numElement = %d\n", numElement); int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i]; } int* d_data; hipMalloc(&d_data, sizeof(int)*numElement); hipMemcpy(d_data, h_data, sizeof(int)*numElement, hipMemcpyHostToDevice); float elapsedTime = 0.0f; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); /*CPU*/ elapsedTime = 0.0f; hipEventRecord(start, 0); for(int i = 0; i < numElement; i++) { gold[i] += 1; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("CPU elapsed time: %.3f ms\n", elapsedTime); /*GPU method 1*/ elapsedTime = 0.0f; hipEventRecord(start, 0); kernel2<<<1024, 512>>>(d_data, numElement); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("kernel2 elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", hipGetErrorString(hipGetLastError())); /*GPU method 2*/ hipMemcpy(d_data, h_data, sizeof(int)*numElement, hipMemcpyHostToDevice); hipEventRecord(start, 0); kernel2_opt<<<1024, 512>>>(d_data, numElement); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("kernel2_opt elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", hipGetErrorString(hipGetLastError())); hipMemcpy(h_data, d_data, sizeof(int)*numElement, hipMemcpyDeviceToHost); hipFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED. i = %d: %d, %d\n", i, h_data[i], gold[i]); return; } } printf("Test pass...\n"); free(h_data); free(gold); } int main() { int numElement = 1*1024*1024; demo2(numElement); //execute once to warm up for performance measurement printf("\n\nstart ............................................\n"); printf("demo2 started!\n"); for(int i = numElement; i <= 32*1024*1024; i*=2) { demo2(i); printf("\n"); } printf("demo1 started!\n"); demo1(); return EXIT_SUCCESS; }
.text .file "demo2.hip" .globl _Z22__device_stub__kernel1Pi # -- Begin function _Z22__device_stub__kernel1Pi .p2align 4, 0x90 .type _Z22__device_stub__kernel1Pi,@function _Z22__device_stub__kernel1Pi: # @_Z22__device_stub__kernel1Pi .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z7kernel1Pi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end0: .size _Z22__device_stub__kernel1Pi, .Lfunc_end0-_Z22__device_stub__kernel1Pi .cfi_endproc # -- End function .globl _Z22__device_stub__kernel2Pii # -- Begin function _Z22__device_stub__kernel2Pii .p2align 4, 0x90 .type _Z22__device_stub__kernel2Pii,@function _Z22__device_stub__kernel2Pii: # @_Z22__device_stub__kernel2Pii .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z7kernel2Pii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end1: .size _Z22__device_stub__kernel2Pii, .Lfunc_end1-_Z22__device_stub__kernel2Pii .cfi_endproc # -- End function .globl _Z26__device_stub__kernel2_optPii # -- Begin function _Z26__device_stub__kernel2_optPii .p2align 4, 0x90 .type _Z26__device_stub__kernel2_optPii,@function _Z26__device_stub__kernel2_optPii: # @_Z26__device_stub__kernel2_optPii .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z11kernel2_optPii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end2: .size _Z26__device_stub__kernel2_optPii, .Lfunc_end2-_Z26__device_stub__kernel2_optPii .cfi_endproc # -- End function .globl _Z5demo1v # -- Begin function _Z5demo1v .p2align 4, 0x90 .type _Z5demo1v,@function _Z5demo1v: # @_Z5demo1v .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $96, %rsp .cfi_def_cfa_offset 128 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl $2097152, %edi # imm = 0x200000 callq malloc movq %rax, %rbx movl $2097152, %edi # imm = 0x200000 callq malloc movq %rax, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB3_1: # =>This Inner Loop Header: Depth=1 callq rand movl %eax, (%rbx,%r15,4) incl %eax movl %eax, (%r14,%r15,4) incq %r15 cmpq $524288, %r15 # imm = 0x80000 jne .LBB3_1 # %bb.2: leaq 8(%rsp), %rdi movl $2097152, %esi # imm = 0x200000 callq hipMalloc movq 8(%rsp), %rdi movl $2097152, %edx # imm = 0x200000 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy leaq 40(%rsp), %rdi callq hipEventCreate leaq 16(%rsp), %rdi callq hipEventCreate movq 40(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movabsq $4294967808, %rdx # imm = 0x100000200 leaq 512(%rdx), %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_4 # %bb.3: movq 8(%rsp), %rax movq %rax, 88(%rsp) leaq 88(%rsp), %rax movq %rax, 48(%rsp) leaq 24(%rsp), %rdi leaq 72(%rsp), %rsi leaq 64(%rsp), %rdx leaq 56(%rsp), %rcx callq __hipPopCallConfiguration movq 24(%rsp), %rsi movl 32(%rsp), %edx movq 72(%rsp), %rcx movl 80(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z7kernel1Pi, %edi pushq 56(%rsp) .cfi_adjust_cfa_offset 8 pushq 72(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_4: movq 16(%rsp), %rdi xorl %r15d, %r15d xorl %esi, %esi callq hipEventRecord movq 16(%rsp), %rdi callq hipEventSynchronize movq 40(%rsp), %rsi movq 16(%rsp), %rdx leaq 24(%rsp), %rdi callq hipEventElapsedTime movss 24(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf callq hipGetLastError movl %eax, %edi callq hipGetErrorString movl $.L.str.1, %edi movq %rax, %rsi xorl %eax, %eax callq printf movq 8(%rsp), %rsi movl $2097152, %edx # imm = 0x200000 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movq 8(%rsp), %rdi callq hipFree .p2align 4, 0x90 .LBB3_6: # =>This Inner Loop Header: Depth=1 movl (%rbx,%r15,4), %eax cmpl (%r14,%r15,4), %eax jne .LBB3_7 # %bb.5: # in Loop: Header=BB3_6 Depth=1 incq %r15 cmpq $524288, %r15 # imm = 0x80000 jne .LBB3_6 # %bb.8: # %.critedge movl $.Lstr.2, %edi callq puts@PLT movq %rbx, %rdi callq free movq %r14, %rdi callq free jmp .LBB3_9 .LBB3_7: movl $.Lstr, %edi callq puts@PLT .LBB3_9: addq $96, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size _Z5demo1v, .Lfunc_end3-_Z5demo1v .cfi_endproc # -- End function .globl _Z5demo2i # -- Begin function _Z5demo2i .p2align 4, 0x90 .type _Z5demo2i,@function _Z5demo2i: # @_Z5demo2i .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $120, %rsp .cfi_def_cfa_offset 176 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %edi, %ebp movl $.L.str.4, %edi movl %ebp, %esi xorl %eax, %eax callq printf movslq %ebp, %r13 leaq (,%r13,4), %r15 movq %r15, %rdi callq malloc movq %rax, %rbx movq %r15, %rdi callq malloc movq %rax, %r14 movl %ebp, %r12d testl %r13d, %r13d jle .LBB4_3 # %bb.1: # %.lr.ph.preheader xorl %r13d, %r13d .p2align 4, 0x90 .LBB4_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand movl %eax, (%rbx,%r13,4) movl %eax, (%r14,%r13,4) incq %r13 cmpq %r13, %r12 jne .LBB4_2 .LBB4_3: # %._crit_edge leaq 32(%rsp), %rdi movq %r15, %rsi callq hipMalloc movq 32(%rsp), %rdi movq %rbx, %rsi movq %r15, 112(%rsp) # 8-byte Spill movq %r15, %rdx movl $1, %ecx callq hipMemcpy movl $0, 4(%rsp) leaq 24(%rsp), %rdi callq hipEventCreate leaq 8(%rsp), %rdi callq hipEventCreate movl $0, 4(%rsp) movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movl %ebp, %r15d testl %ebp, %ebp jle .LBB4_6 # %bb.4: # %.lr.ph75.preheader xorl %eax, %eax .p2align 4, 0x90 .LBB4_5: # %.lr.ph75 # =>This Inner Loop Header: Depth=1 incl (%r14,%rax,4) incq %rax cmpq %rax, %r12 jne .LBB4_5 .LBB4_6: # %._crit_edge76 movabsq $4294967808, %rbp # imm = 0x100000200 movq 8(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 8(%rsp), %rdi callq hipEventSynchronize movq 24(%rsp), %rsi movq 8(%rsp), %rdx leaq 4(%rsp), %rdi callq hipEventElapsedTime movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.5, %edi movb $1, %al callq printf movl $0, 4(%rsp) movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord leaq 512(%rbp), %r13 movq %r13, %rdi movl $1, %esi movq %rbp, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax movl %r15d, %ebp jne .LBB4_8 # %bb.7: movq 32(%rsp), %rax movq %rax, 88(%rsp) movl %ebp, 20(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z7kernel2Pii, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_8: movq 8(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 8(%rsp), %rdi callq hipEventSynchronize movq 24(%rsp), %rsi movq 8(%rsp), %rdx leaq 4(%rsp), %rdi callq hipEventElapsedTime movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.6, %edi movb $1, %al callq printf callq hipGetLastError movl %eax, %edi callq hipGetErrorString movl $.L.str.7, %edi movq %rax, %rsi xorl %eax, %eax callq printf movq 32(%rsp), %rdi movq %rbx, %rsi movq 112(%rsp), %r15 # 8-byte Reload movq %r15, %rdx movl $1, %ecx callq hipMemcpy movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq %r13, %rdi movl $1, %esi movabsq $4294967808, %rdx # imm = 0x100000200 movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_10 # %bb.9: movq 32(%rsp), %rax movq %rax, 88(%rsp) movl %ebp, 20(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z11kernel2_optPii, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_10: movq 8(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 8(%rsp), %rdi callq hipEventSynchronize movq 24(%rsp), %rsi movq 8(%rsp), %rdx leaq 4(%rsp), %rdi callq hipEventElapsedTime movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.8, %edi movb $1, %al callq printf callq hipGetLastError movl %eax, %edi callq hipGetErrorString movl $.L.str.7, %edi movq %rax, %rsi xorl %eax, %eax callq printf movq 32(%rsp), %rsi movq %rbx, %rdi movq %r15, %rdx movl $2, %ecx callq hipMemcpy movq 32(%rsp), %rdi callq hipFree testl %ebp, %ebp jle .LBB4_15 # %bb.11: # %.lr.ph80.preheader xorl %esi, %esi .p2align 4, 0x90 .LBB4_12: # %.lr.ph80 # =>This Inner Loop Header: Depth=1 movl (%rbx,%rsi,4), %edx movl (%r14,%rsi,4), %ecx cmpl %ecx, %edx jne .LBB4_13 # %bb.14: # in Loop: Header=BB4_12 Depth=1 incq %rsi cmpq %rsi, %r12 jne .LBB4_12 .LBB4_15: # %.critedge movl $.Lstr.2, %edi callq puts@PLT movq %rbx, %rdi callq free movq %r14, %rdi callq free jmp .LBB4_16 .LBB4_13: movl $.L.str.9, %edi # kill: def $esi killed $esi killed $rsi xorl %eax, %eax callq printf .LBB4_16: addq $120, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _Z5demo2i, .Lfunc_end4-_Z5demo2i .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 movl $1048576, %ebx # imm = 0x100000 movl $1048576, %edi # imm = 0x100000 callq _Z5demo2i movl $.Lstr.3, %edi callq puts@PLT movl $.Lstr.4, %edi callq puts@PLT .p2align 4, 0x90 .LBB5_1: # =>This Inner Loop Header: Depth=1 movl %ebx, %edi callq _Z5demo2i movl $10, %edi callq putchar@PLT leal (%rbx,%rbx), %eax cmpl $16777217, %ebx # imm = 0x1000001 movl %eax, %ebx jb .LBB5_1 # %bb.2: movl $.Lstr.5, %edi callq puts@PLT callq _Z5demo1v xorl %eax, %eax popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size main, .Lfunc_end5-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7kernel1Pi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7kernel2Pii, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11kernel2_optPii, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type _Z7kernel1Pi,@object # @_Z7kernel1Pi .section .rodata,"a",@progbits .globl _Z7kernel1Pi .p2align 3, 0x0 _Z7kernel1Pi: .quad _Z22__device_stub__kernel1Pi .size _Z7kernel1Pi, 8 .type _Z7kernel2Pii,@object # @_Z7kernel2Pii .globl _Z7kernel2Pii .p2align 3, 0x0 _Z7kernel2Pii: .quad _Z22__device_stub__kernel2Pii .size _Z7kernel2Pii, 8 .type _Z11kernel2_optPii,@object # @_Z11kernel2_optPii .globl _Z11kernel2_optPii .p2align 3, 0x0 _Z11kernel2_optPii: .quad _Z26__device_stub__kernel2_optPii .size _Z11kernel2_optPii, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Kernel elapsed time: %.3f ms\n" .size .L.str, 30 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "kernel1: %s\n" .size .L.str.1, 13 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "numElement = %d\n" .size .L.str.4, 17 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "CPU elapsed time: %.3f ms\n" .size .L.str.5, 27 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "kernel2 elapsed time: %.3f ms\n" .size .L.str.6, 31 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "kernel2: %s\n" .size .L.str.7, 13 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "kernel2_opt elapsed time: %.3f ms\n" .size .L.str.8, 35 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz "!!!ERROR, TEST FAILED. i = %d: %d, %d\n" .size .L.str.9, 39 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z7kernel1Pi" .size .L__unnamed_1, 13 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z7kernel2Pii" .size .L__unnamed_2, 14 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z11kernel2_optPii" .size .L__unnamed_3, 19 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "!!!ERROR, TEST FAILED." .size .Lstr, 23 .type .Lstr.2,@object # @str.2 .Lstr.2: .asciz "Test pass..." .size .Lstr.2, 13 .type .Lstr.3,@object # @str.3 .Lstr.3: .asciz "\n\nstart ............................................" .size .Lstr.3, 53 .type .Lstr.4,@object # @str.4 .Lstr.4: .asciz "demo2 started!" .size .Lstr.4, 15 .type .Lstr.5,@object # @str.5 .Lstr.5: .asciz "demo1 started!" .size .Lstr.5, 15 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z22__device_stub__kernel1Pi .addrsig_sym _Z22__device_stub__kernel2Pii .addrsig_sym _Z26__device_stub__kernel2_optPii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z7kernel1Pi .addrsig_sym _Z7kernel2Pii .addrsig_sym _Z11kernel2_optPii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0001852a_00000000-6_demo2.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z26__device_stub__Z7kernel1PiPi .type _Z26__device_stub__Z7kernel1PiPi, @function _Z26__device_stub__Z7kernel1PiPi: .LFB2084: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 88(%rsp), %rax subq %fs:40, %rax jne .L8 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z7kernel1Pi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z26__device_stub__Z7kernel1PiPi, .-_Z26__device_stub__Z7kernel1PiPi .globl _Z7kernel1Pi .type _Z7kernel1Pi, @function _Z7kernel1Pi: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z7kernel1PiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z7kernel1Pi, .-_Z7kernel1Pi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Kernel elapsed time: %.3f ms\n" .LC1: .string "kernel1: %s\n" .LC2: .string "!!!ERROR, TEST FAILED.\n" .LC3: .string "Test pass...\n" .text .globl _Z5demo1v .type _Z5demo1v, @function _Z5demo1v: .LFB2057: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $64, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $2097152, %edi call malloc@PLT movq %rax, %rbp movl $2097152, %edi call malloc@PLT movq %rax, %r12 movl $0, %ebx .L12: call rand@PLT movl %eax, 0(%rbp,%rbx) addl $1, %eax movl %eax, (%r12,%rbx) addq $4, %rbx cmpq $2097152, %rbx jne .L12 leaq 8(%rsp), %rdi movl $2097152, %esi call cudaMalloc@PLT movl $1, %ecx movl $2097152, %edx movq %rbp, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT leaq 16(%rsp), %rdi call cudaEventCreate@PLT leaq 24(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 16(%rsp), %rdi call cudaEventRecord@PLT movl $512, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1024, 32(%rsp) movl $1, 36(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L21 .L13: movl $0, %esi movq 24(%rsp), %rdi call cudaEventRecord@PLT movq 24(%rsp), %rdi call cudaEventSynchronize@PLT leaq 44(%rsp), %rdi movq 24(%rsp), %rdx movq 16(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 44(%rsp), %xmm0 leaq .LC0(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $2, %ecx movl $2097152, %edx movq 8(%rsp), %rsi movq %rbp, %rdi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movl $0, %eax .L16: movl (%r12,%rax), %ecx cmpl %ecx, 0(%rbp,%rax) jne .L22 addq $4, %rax cmpq $2097152, %rax jne .L16 leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %rbp, %rdi call free@PLT movq %r12, %rdi call free@PLT jmp .L11 .L21: movq 8(%rsp), %rdi call _Z26__device_stub__Z7kernel1PiPi jmp .L13 .L22: leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L11: movq 56(%rsp), %rax subq %fs:40, %rax jne .L23 addq $64, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z5demo1v, .-_Z5demo1v .globl _Z27__device_stub__Z7kernel2PiiPii .type _Z27__device_stub__Z7kernel2PiiPii, @function _Z27__device_stub__Z7kernel2PiiPii: .LFB2086: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L28 .L24: movq 104(%rsp), %rax subq %fs:40, %rax jne .L29 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L28: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z7kernel2Pii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L24 .L29: call __stack_chk_fail@PLT .cfi_endproc .LFE2086: .size _Z27__device_stub__Z7kernel2PiiPii, .-_Z27__device_stub__Z7kernel2PiiPii .globl _Z7kernel2Pii .type _Z7kernel2Pii, @function _Z7kernel2Pii: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z27__device_stub__Z7kernel2PiiPii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _Z7kernel2Pii, .-_Z7kernel2Pii .globl _Z32__device_stub__Z11kernel2_optPiiPii .type _Z32__device_stub__Z11kernel2_optPiiPii, @function _Z32__device_stub__Z11kernel2_optPiiPii: .LFB2088: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L36 .L32: movq 104(%rsp), %rax subq %fs:40, %rax jne .L37 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L36: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z11kernel2_optPii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L32 .L37: call __stack_chk_fail@PLT .cfi_endproc .LFE2088: .size _Z32__device_stub__Z11kernel2_optPiiPii, .-_Z32__device_stub__Z11kernel2_optPiiPii .globl _Z11kernel2_optPii .type _Z11kernel2_optPii, @function _Z11kernel2_optPii: .LFB2089: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z11kernel2_optPiiPii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _Z11kernel2_optPii, .-_Z11kernel2_optPii .section .rodata.str1.1 .LC4: .string "numElement = %d\n" .LC6: .string "CPU elapsed time: %.3f ms\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC7: .string "kernel2 elapsed time: %.3f ms\n" .section .rodata.str1.1 .LC8: .string "kernel2: %s\n" .section .rodata.str1.8 .align 8 .LC9: .string "kernel2_opt elapsed time: %.3f ms\n" .align 8 .LC10: .string "!!!ERROR, TEST FAILED. i = %d: %d, %d\n" .text .globl _Z5demo2i .type _Z5demo2i, @function _Z5demo2i: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $104, %rsp .cfi_def_cfa_offset 160 movl %edi, %ebx movl %edi, 28(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax movl %edi, %edx leaq .LC4(%rip), %rsi movl $2, %edi call __printf_chk@PLT movslq %ebx, %rax movq %rax, 8(%rsp) leaq 0(,%rax,4), %r15 movq %r15, 16(%rsp) movq %r15, %rdi call malloc@PLT movq %rax, %r13 movq %r15, %rdi call malloc@PLT movq %rax, %r14 testl %ebx, %ebx jle .L41 movq %r13, %rbp movq %rax, %rbx addq %r13, %r15 movq %rax, %r12 .L42: call rand@PLT movl %eax, 0(%rbp) movl %eax, (%r12) addq $4, %rbp addq $4, %r12 cmpq %r15, %rbp jne .L42 leaq 40(%rsp), %rdi movq 16(%rsp), %r15 movq %r15, %rsi call cudaMalloc@PLT movl $1, %ecx movq %r15, %rdx movq %r13, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $0x00000000, 36(%rsp) leaq 48(%rsp), %rdi call cudaEventCreate@PLT leaq 56(%rsp), %rdi call cudaEventCreate@PLT movl $0x00000000, 36(%rsp) movl $0, %esi movq 48(%rsp), %rdi call cudaEventRecord@PLT leaq (%r14,%r15), %rax .L43: addl $1, (%rbx) addq $4, %rbx cmpq %rax, %rbx jne .L43 .L50: movl $0, %esi movq 56(%rsp), %rdi call cudaEventRecord@PLT movq 56(%rsp), %rdi call cudaEventSynchronize@PLT leaq 36(%rsp), %rdi movq 56(%rsp), %rdx movq 48(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 36(%rsp), %xmm0 leaq .LC6(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl $0x00000000, 36(%rsp) movl $0, %esi movq 48(%rsp), %rdi call cudaEventRecord@PLT movl $512, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) movl $1024, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $0, %r9d movl $0, %r8d movq 76(%rsp), %rdx movl $1, %ecx movq 64(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L56 .L44: movl $0, %esi movq 56(%rsp), %rdi call cudaEventRecord@PLT movq 56(%rsp), %rdi call cudaEventSynchronize@PLT leaq 36(%rsp), %rdi movq 56(%rsp), %rdx movq 48(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 36(%rsp), %xmm0 leaq .LC7(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %ecx movq 16(%rsp), %rdx movq %r13, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $0, %esi movq 48(%rsp), %rdi call cudaEventRecord@PLT movl $512, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) movl $1024, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $0, %r9d movl $0, %r8d movq 76(%rsp), %rdx movl $1, %ecx movq 64(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L57 .L45: movl $0, %esi movq 56(%rsp), %rdi call cudaEventRecord@PLT movq 56(%rsp), %rdi call cudaEventSynchronize@PLT leaq 36(%rsp), %rdi movq 56(%rsp), %rdx movq 48(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 36(%rsp), %xmm0 leaq .LC9(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $2, %ecx movq 16(%rsp), %rdx movq 40(%rsp), %rsi movq %r13, %rdi call cudaMemcpy@PLT movq 40(%rsp), %rdi call cudaFree@PLT cmpl $0, 28(%rsp) jle .L46 movl $0, %edx .L49: movl 0(%r13,%rdx,4), %ecx movl (%r14,%rdx,4), %r8d cmpl %r8d, %ecx jne .L58 addq $1, %rdx cmpq %rdx, 8(%rsp) jne .L49 .L46: leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %r13, %rdi call free@PLT movq %r14, %rdi call free@PLT .L40: movq 88(%rsp), %rax subq %fs:40, %rax jne .L59 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L56: .cfi_restore_state movl 28(%rsp), %esi movq 40(%rsp), %rdi call _Z27__device_stub__Z7kernel2PiiPii jmp .L44 .L57: movl 28(%rsp), %esi movq 40(%rsp), %rdi call _Z32__device_stub__Z11kernel2_optPiiPii jmp .L45 .L58: leaq .LC10(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L40 .L41: leaq 40(%rsp), %rdi movq 16(%rsp), %rbx movq %rbx, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbx, %rdx movq %r13, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $0x00000000, 36(%rsp) leaq 48(%rsp), %rdi call cudaEventCreate@PLT leaq 56(%rsp), %rdi call cudaEventCreate@PLT movl $0x00000000, 36(%rsp) movl $0, %esi movq 48(%rsp), %rdi call cudaEventRecord@PLT jmp .L50 .L59: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size _Z5demo2i, .-_Z5demo2i .section .rodata.str1.8 .align 8 .LC11: .string "\n\nstart ............................................\n" .section .rodata.str1.1 .LC12: .string "demo2 started!\n" .LC13: .string "\n" .LC14: .string "demo1 started!\n" .text .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movl $1048576, %edi call _Z5demo2i leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC12(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $6, %ebp movl $1048576, %ebx leaq .LC13(%rip), %r12 .L61: movl %ebx, %edi call _Z5demo2i movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl %ebx, %ebx subl $1, %ebp jne .L61 leaq .LC14(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call _Z5demo1v movl $0, %eax popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.1 .LC15: .string "_Z11kernel2_optPii" .LC16: .string "_Z7kernel2Pii" .LC17: .string "_Z7kernel1Pi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2091: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC15(%rip), %rdx movq %rdx, %rcx leaq _Z11kernel2_optPii(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC16(%rip), %rdx movq %rdx, %rcx leaq _Z7kernel2Pii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC17(%rip), %rdx movq %rdx, %rcx leaq _Z7kernel1Pi(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2091: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "demo2.hip" .globl _Z22__device_stub__kernel1Pi # -- Begin function _Z22__device_stub__kernel1Pi .p2align 4, 0x90 .type _Z22__device_stub__kernel1Pi,@function _Z22__device_stub__kernel1Pi: # @_Z22__device_stub__kernel1Pi .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z7kernel1Pi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end0: .size _Z22__device_stub__kernel1Pi, .Lfunc_end0-_Z22__device_stub__kernel1Pi .cfi_endproc # -- End function .globl _Z22__device_stub__kernel2Pii # -- Begin function _Z22__device_stub__kernel2Pii .p2align 4, 0x90 .type _Z22__device_stub__kernel2Pii,@function _Z22__device_stub__kernel2Pii: # @_Z22__device_stub__kernel2Pii .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z7kernel2Pii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end1: .size _Z22__device_stub__kernel2Pii, .Lfunc_end1-_Z22__device_stub__kernel2Pii .cfi_endproc # -- End function .globl _Z26__device_stub__kernel2_optPii # -- Begin function _Z26__device_stub__kernel2_optPii .p2align 4, 0x90 .type _Z26__device_stub__kernel2_optPii,@function _Z26__device_stub__kernel2_optPii: # @_Z26__device_stub__kernel2_optPii .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z11kernel2_optPii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end2: .size _Z26__device_stub__kernel2_optPii, .Lfunc_end2-_Z26__device_stub__kernel2_optPii .cfi_endproc # -- End function .globl _Z5demo1v # -- Begin function _Z5demo1v .p2align 4, 0x90 .type _Z5demo1v,@function _Z5demo1v: # @_Z5demo1v .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $96, %rsp .cfi_def_cfa_offset 128 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl $2097152, %edi # imm = 0x200000 callq malloc movq %rax, %rbx movl $2097152, %edi # imm = 0x200000 callq malloc movq %rax, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB3_1: # =>This Inner Loop Header: Depth=1 callq rand movl %eax, (%rbx,%r15,4) incl %eax movl %eax, (%r14,%r15,4) incq %r15 cmpq $524288, %r15 # imm = 0x80000 jne .LBB3_1 # %bb.2: leaq 8(%rsp), %rdi movl $2097152, %esi # imm = 0x200000 callq hipMalloc movq 8(%rsp), %rdi movl $2097152, %edx # imm = 0x200000 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy leaq 40(%rsp), %rdi callq hipEventCreate leaq 16(%rsp), %rdi callq hipEventCreate movq 40(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movabsq $4294967808, %rdx # imm = 0x100000200 leaq 512(%rdx), %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_4 # %bb.3: movq 8(%rsp), %rax movq %rax, 88(%rsp) leaq 88(%rsp), %rax movq %rax, 48(%rsp) leaq 24(%rsp), %rdi leaq 72(%rsp), %rsi leaq 64(%rsp), %rdx leaq 56(%rsp), %rcx callq __hipPopCallConfiguration movq 24(%rsp), %rsi movl 32(%rsp), %edx movq 72(%rsp), %rcx movl 80(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z7kernel1Pi, %edi pushq 56(%rsp) .cfi_adjust_cfa_offset 8 pushq 72(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_4: movq 16(%rsp), %rdi xorl %r15d, %r15d xorl %esi, %esi callq hipEventRecord movq 16(%rsp), %rdi callq hipEventSynchronize movq 40(%rsp), %rsi movq 16(%rsp), %rdx leaq 24(%rsp), %rdi callq hipEventElapsedTime movss 24(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf callq hipGetLastError movl %eax, %edi callq hipGetErrorString movl $.L.str.1, %edi movq %rax, %rsi xorl %eax, %eax callq printf movq 8(%rsp), %rsi movl $2097152, %edx # imm = 0x200000 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movq 8(%rsp), %rdi callq hipFree .p2align 4, 0x90 .LBB3_6: # =>This Inner Loop Header: Depth=1 movl (%rbx,%r15,4), %eax cmpl (%r14,%r15,4), %eax jne .LBB3_7 # %bb.5: # in Loop: Header=BB3_6 Depth=1 incq %r15 cmpq $524288, %r15 # imm = 0x80000 jne .LBB3_6 # %bb.8: # %.critedge movl $.Lstr.2, %edi callq puts@PLT movq %rbx, %rdi callq free movq %r14, %rdi callq free jmp .LBB3_9 .LBB3_7: movl $.Lstr, %edi callq puts@PLT .LBB3_9: addq $96, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size _Z5demo1v, .Lfunc_end3-_Z5demo1v .cfi_endproc # -- End function .globl _Z5demo2i # -- Begin function _Z5demo2i .p2align 4, 0x90 .type _Z5demo2i,@function _Z5demo2i: # @_Z5demo2i .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $120, %rsp .cfi_def_cfa_offset 176 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %edi, %ebp movl $.L.str.4, %edi movl %ebp, %esi xorl %eax, %eax callq printf movslq %ebp, %r13 leaq (,%r13,4), %r15 movq %r15, %rdi callq malloc movq %rax, %rbx movq %r15, %rdi callq malloc movq %rax, %r14 movl %ebp, %r12d testl %r13d, %r13d jle .LBB4_3 # %bb.1: # %.lr.ph.preheader xorl %r13d, %r13d .p2align 4, 0x90 .LBB4_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand movl %eax, (%rbx,%r13,4) movl %eax, (%r14,%r13,4) incq %r13 cmpq %r13, %r12 jne .LBB4_2 .LBB4_3: # %._crit_edge leaq 32(%rsp), %rdi movq %r15, %rsi callq hipMalloc movq 32(%rsp), %rdi movq %rbx, %rsi movq %r15, 112(%rsp) # 8-byte Spill movq %r15, %rdx movl $1, %ecx callq hipMemcpy movl $0, 4(%rsp) leaq 24(%rsp), %rdi callq hipEventCreate leaq 8(%rsp), %rdi callq hipEventCreate movl $0, 4(%rsp) movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movl %ebp, %r15d testl %ebp, %ebp jle .LBB4_6 # %bb.4: # %.lr.ph75.preheader xorl %eax, %eax .p2align 4, 0x90 .LBB4_5: # %.lr.ph75 # =>This Inner Loop Header: Depth=1 incl (%r14,%rax,4) incq %rax cmpq %rax, %r12 jne .LBB4_5 .LBB4_6: # %._crit_edge76 movabsq $4294967808, %rbp # imm = 0x100000200 movq 8(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 8(%rsp), %rdi callq hipEventSynchronize movq 24(%rsp), %rsi movq 8(%rsp), %rdx leaq 4(%rsp), %rdi callq hipEventElapsedTime movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.5, %edi movb $1, %al callq printf movl $0, 4(%rsp) movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord leaq 512(%rbp), %r13 movq %r13, %rdi movl $1, %esi movq %rbp, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax movl %r15d, %ebp jne .LBB4_8 # %bb.7: movq 32(%rsp), %rax movq %rax, 88(%rsp) movl %ebp, 20(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z7kernel2Pii, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_8: movq 8(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 8(%rsp), %rdi callq hipEventSynchronize movq 24(%rsp), %rsi movq 8(%rsp), %rdx leaq 4(%rsp), %rdi callq hipEventElapsedTime movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.6, %edi movb $1, %al callq printf callq hipGetLastError movl %eax, %edi callq hipGetErrorString movl $.L.str.7, %edi movq %rax, %rsi xorl %eax, %eax callq printf movq 32(%rsp), %rdi movq %rbx, %rsi movq 112(%rsp), %r15 # 8-byte Reload movq %r15, %rdx movl $1, %ecx callq hipMemcpy movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq %r13, %rdi movl $1, %esi movabsq $4294967808, %rdx # imm = 0x100000200 movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_10 # %bb.9: movq 32(%rsp), %rax movq %rax, 88(%rsp) movl %ebp, 20(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z11kernel2_optPii, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_10: movq 8(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 8(%rsp), %rdi callq hipEventSynchronize movq 24(%rsp), %rsi movq 8(%rsp), %rdx leaq 4(%rsp), %rdi callq hipEventElapsedTime movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.8, %edi movb $1, %al callq printf callq hipGetLastError movl %eax, %edi callq hipGetErrorString movl $.L.str.7, %edi movq %rax, %rsi xorl %eax, %eax callq printf movq 32(%rsp), %rsi movq %rbx, %rdi movq %r15, %rdx movl $2, %ecx callq hipMemcpy movq 32(%rsp), %rdi callq hipFree testl %ebp, %ebp jle .LBB4_15 # %bb.11: # %.lr.ph80.preheader xorl %esi, %esi .p2align 4, 0x90 .LBB4_12: # %.lr.ph80 # =>This Inner Loop Header: Depth=1 movl (%rbx,%rsi,4), %edx movl (%r14,%rsi,4), %ecx cmpl %ecx, %edx jne .LBB4_13 # %bb.14: # in Loop: Header=BB4_12 Depth=1 incq %rsi cmpq %rsi, %r12 jne .LBB4_12 .LBB4_15: # %.critedge movl $.Lstr.2, %edi callq puts@PLT movq %rbx, %rdi callq free movq %r14, %rdi callq free jmp .LBB4_16 .LBB4_13: movl $.L.str.9, %edi # kill: def $esi killed $esi killed $rsi xorl %eax, %eax callq printf .LBB4_16: addq $120, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _Z5demo2i, .Lfunc_end4-_Z5demo2i .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 movl $1048576, %ebx # imm = 0x100000 movl $1048576, %edi # imm = 0x100000 callq _Z5demo2i movl $.Lstr.3, %edi callq puts@PLT movl $.Lstr.4, %edi callq puts@PLT .p2align 4, 0x90 .LBB5_1: # =>This Inner Loop Header: Depth=1 movl %ebx, %edi callq _Z5demo2i movl $10, %edi callq putchar@PLT leal (%rbx,%rbx), %eax cmpl $16777217, %ebx # imm = 0x1000001 movl %eax, %ebx jb .LBB5_1 # %bb.2: movl $.Lstr.5, %edi callq puts@PLT callq _Z5demo1v xorl %eax, %eax popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size main, .Lfunc_end5-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7kernel1Pi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7kernel2Pii, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11kernel2_optPii, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type _Z7kernel1Pi,@object # @_Z7kernel1Pi .section .rodata,"a",@progbits .globl _Z7kernel1Pi .p2align 3, 0x0 _Z7kernel1Pi: .quad _Z22__device_stub__kernel1Pi .size _Z7kernel1Pi, 8 .type _Z7kernel2Pii,@object # @_Z7kernel2Pii .globl _Z7kernel2Pii .p2align 3, 0x0 _Z7kernel2Pii: .quad _Z22__device_stub__kernel2Pii .size _Z7kernel2Pii, 8 .type _Z11kernel2_optPii,@object # @_Z11kernel2_optPii .globl _Z11kernel2_optPii .p2align 3, 0x0 _Z11kernel2_optPii: .quad _Z26__device_stub__kernel2_optPii .size _Z11kernel2_optPii, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Kernel elapsed time: %.3f ms\n" .size .L.str, 30 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "kernel1: %s\n" .size .L.str.1, 13 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "numElement = %d\n" .size .L.str.4, 17 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "CPU elapsed time: %.3f ms\n" .size .L.str.5, 27 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "kernel2 elapsed time: %.3f ms\n" .size .L.str.6, 31 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "kernel2: %s\n" .size .L.str.7, 13 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "kernel2_opt elapsed time: %.3f ms\n" .size .L.str.8, 35 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz "!!!ERROR, TEST FAILED. i = %d: %d, %d\n" .size .L.str.9, 39 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z7kernel1Pi" .size .L__unnamed_1, 13 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z7kernel2Pii" .size .L__unnamed_2, 14 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z11kernel2_optPii" .size .L__unnamed_3, 19 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "!!!ERROR, TEST FAILED." .size .Lstr, 23 .type .Lstr.2,@object # @str.2 .Lstr.2: .asciz "Test pass..." .size .Lstr.2, 13 .type .Lstr.3,@object # @str.3 .Lstr.3: .asciz "\n\nstart ............................................" .size .Lstr.3, 53 .type .Lstr.4,@object # @str.4 .Lstr.4: .asciz "demo2 started!" .size .Lstr.4, 15 .type .Lstr.5,@object # @str.5 .Lstr.5: .asciz "demo1 started!" .size .Lstr.5, 15 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z22__device_stub__kernel1Pi .addrsig_sym _Z22__device_stub__kernel2Pii .addrsig_sym _Z26__device_stub__kernel2_optPii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z7kernel1Pi .addrsig_sym _Z7kernel2Pii .addrsig_sym _Z11kernel2_optPii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" #define N 10000000 __global__ void compute_histogram(unsigned char *data, unsigned int *histogram) { __shared__ unsigned int cache[256]; int i = blockIdx.x * blockDim.x + threadIdx.x; cache[threadIdx.x] = 0; __syncthreads(); while(i < N) { atomicAdd(&cache[data[i]], 1); i += blockDim.x * gridDim.x; } __syncthreads(); atomicAdd(&histogram[threadIdx.x], cache[threadIdx.x]); }
code for sm_80 Function : _Z17compute_histogramPhPj .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */ /* 0x000e220000002100 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0040*/ STS [R7.X4], RZ ; /* 0x000000ff07007388 */ /* 0x0011e20000004800 */ /*0050*/ IMAD R0, R0, c[0x0][0x0], R7 ; /* 0x0000000000007a24 */ /* 0x002fc600078e0207 */ /*0060*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe40000010000 */ /*0070*/ ISETP.GT.AND P0, PT, R0, 0x98967f, PT ; /* 0x0098967f0000780c */ /* 0x000fda0003f04270 */ /*0080*/ @P0 BRA 0x120 ; /* 0x0000009000000947 */ /* 0x001fea0003800000 */ /*0090*/ IADD3 R2, P0, R0, c[0x0][0x160], RZ ; /* 0x0000580000027a10 */ /* 0x001fc80007f1e0ff */ /*00a0*/ LEA.HI.X.SX32 R3, R0, c[0x0][0x164], 0x1, P0 ; /* 0x0000590000037a11 */ /* 0x000fca00000f0eff */ /*00b0*/ LDG.E.U8 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1100 */ /*00c0*/ MOV R5, c[0x0][0x0] ; /* 0x0000000000057a02 */ /* 0x000fe20000000f00 */ /*00d0*/ YIELD ; /* 0x0000000000007946 */ /* 0x000fe80003800000 */ /*00e0*/ IMAD R0, R5, c[0x0][0xc], R0 ; /* 0x0000030005007a24 */ /* 0x000fca00078e0200 */ /*00f0*/ ISETP.GE.AND P0, PT, R0, 0x989680, PT ; /* 0x009896800000780c */ /* 0x000fe20003f06270 */ /*0100*/ ATOMS.POPC.INC.32 RZ, [R2.X4+URZ] ; /* 0x0000000002ff7f8c */ /* 0x0041d8000d00403f */ /*0110*/ @!P0 BRA 0x90 ; /* 0xffffff7000008947 */ /* 0x000fea000383ffff */ /*0120*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */ /* 0x000fe20003800000 */ /*0130*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0140*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */ /* 0x001fc800078e00ff */ /*0150*/ IMAD.WIDE.U32 R2, R7, R2, c[0x0][0x168] ; /* 0x00005a0007027625 */ /* 0x000fe200078e0002 */ /*0160*/ LDS R5, [R7.X4] ; /* 0x0000000007057984 */ /* 0x000e280000004800 */ /*0170*/ RED.E.ADD.STRONG.GPU [R2.64], R5 ; /* 0x000000050200798e */ /* 0x001fe2000c10e184 */ /*0180*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0190*/ BRA 0x190; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" #define N 10000000 __global__ void compute_histogram(unsigned char *data, unsigned int *histogram) { __shared__ unsigned int cache[256]; int i = blockIdx.x * blockDim.x + threadIdx.x; cache[threadIdx.x] = 0; __syncthreads(); while(i < N) { atomicAdd(&cache[data[i]], 1); i += blockDim.x * gridDim.x; } __syncthreads(); atomicAdd(&histogram[threadIdx.x], cache[threadIdx.x]); }
.file "tmpxft_00090dd4_00000000-6_compute_histogram.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z39__device_stub__Z17compute_histogramPhPjPhPj .type _Z39__device_stub__Z17compute_histogramPhPjPhPj, @function _Z39__device_stub__Z17compute_histogramPhPjPhPj: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z17compute_histogramPhPj(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z39__device_stub__Z17compute_histogramPhPjPhPj, .-_Z39__device_stub__Z17compute_histogramPhPjPhPj .globl _Z17compute_histogramPhPj .type _Z17compute_histogramPhPj, @function _Z17compute_histogramPhPj: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z17compute_histogramPhPjPhPj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z17compute_histogramPhPj, .-_Z17compute_histogramPhPj .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z17compute_histogramPhPj" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z17compute_histogramPhPj(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" #define N 10000000 __global__ void compute_histogram(unsigned char *data, unsigned int *histogram) { __shared__ unsigned int cache[256]; int i = blockIdx.x * blockDim.x + threadIdx.x; cache[threadIdx.x] = 0; __syncthreads(); while(i < N) { atomicAdd(&cache[data[i]], 1); i += blockDim.x * gridDim.x; } __syncthreads(); atomicAdd(&histogram[threadIdx.x], cache[threadIdx.x]); }
#include <hip/hip_runtime.h> #include "includes.h" #define N 10000000 __global__ void compute_histogram(unsigned char *data, unsigned int *histogram) { __shared__ unsigned int cache[256]; int i = blockIdx.x * blockDim.x + threadIdx.x; cache[threadIdx.x] = 0; __syncthreads(); while(i < N) { atomicAdd(&cache[data[i]], 1); i += blockDim.x * gridDim.x; } __syncthreads(); atomicAdd(&histogram[threadIdx.x], cache[threadIdx.x]); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" #define N 10000000 __global__ void compute_histogram(unsigned char *data, unsigned int *histogram) { __shared__ unsigned int cache[256]; int i = blockIdx.x * blockDim.x + threadIdx.x; cache[threadIdx.x] = 0; __syncthreads(); while(i < N) { atomicAdd(&cache[data[i]], 1); i += blockDim.x * gridDim.x; } __syncthreads(); atomicAdd(&histogram[threadIdx.x], cache[threadIdx.x]); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17compute_histogramPhPj .globl _Z17compute_histogramPhPj .p2align 8 .type _Z17compute_histogramPhPj,@function _Z17compute_histogramPhPj: s_load_b32 s4, s[0:1], 0x1c s_add_u32 s2, s0, 16 s_addc_u32 s3, s1, 0 v_mov_b32_e32 v3, 0 s_waitcnt lgkmcnt(0) s_and_b32 s5, s4, 0xffff s_mov_b32 s4, exec_lo v_mad_u64_u32 v[1:2], null, s15, s5, v[0:1] v_lshlrev_b32_e32 v2, 2, v0 ds_store_b32 v2, v3 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv v_cmpx_gt_i32_e32 0x989680, v1 s_cbranch_execz .LBB0_3 s_load_b32 s6, s[2:3], 0x0 s_load_b64 s[2:3], s[0:1], 0x0 v_mov_b32_e32 v3, 1 s_waitcnt lgkmcnt(0) s_mul_i32 s5, s6, s5 s_mov_b32 s6, 0 .LBB0_2: v_ashrrev_i32_e32 v5, 31, v1 v_add_co_u32 v4, vcc_lo, s2, v1 v_add_nc_u32_e32 v1, s5, v1 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo v_cmp_lt_i32_e32 vcc_lo, 0x98967f, v1 global_load_u8 v4, v[4:5], off s_or_b32 s6, vcc_lo, s6 s_waitcnt vmcnt(0) v_lshlrev_b32_e32 v4, 2, v4 ds_add_u32 v4, v3 s_and_not1_b32 exec_lo, exec_lo, s6 s_cbranch_execnz .LBB0_2 .LBB0_3: s_or_b32 exec_lo, exec_lo, s4 s_load_b64 s[0:1], s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv ds_load_b32 v1, v2 v_lshlrev_b32_e32 v0, 2, v0 s_waitcnt lgkmcnt(0) global_atomic_add_u32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17compute_histogramPhPj .amdhsa_group_segment_fixed_size 1024 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17compute_histogramPhPj, .Lfunc_end0-_Z17compute_histogramPhPj .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 1024 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17compute_histogramPhPj .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17compute_histogramPhPj.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" #define N 10000000 __global__ void compute_histogram(unsigned char *data, unsigned int *histogram) { __shared__ unsigned int cache[256]; int i = blockIdx.x * blockDim.x + threadIdx.x; cache[threadIdx.x] = 0; __syncthreads(); while(i < N) { atomicAdd(&cache[data[i]], 1); i += blockDim.x * gridDim.x; } __syncthreads(); atomicAdd(&histogram[threadIdx.x], cache[threadIdx.x]); }
.text .file "compute_histogram.hip" .globl _Z32__device_stub__compute_histogramPhPj # -- Begin function _Z32__device_stub__compute_histogramPhPj .p2align 4, 0x90 .type _Z32__device_stub__compute_histogramPhPj,@function _Z32__device_stub__compute_histogramPhPj: # @_Z32__device_stub__compute_histogramPhPj .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z17compute_histogramPhPj, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z32__device_stub__compute_histogramPhPj, .Lfunc_end0-_Z32__device_stub__compute_histogramPhPj .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17compute_histogramPhPj, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z17compute_histogramPhPj,@object # @_Z17compute_histogramPhPj .section .rodata,"a",@progbits .globl _Z17compute_histogramPhPj .p2align 3, 0x0 _Z17compute_histogramPhPj: .quad _Z32__device_stub__compute_histogramPhPj .size _Z17compute_histogramPhPj, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z17compute_histogramPhPj" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z32__device_stub__compute_histogramPhPj .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z17compute_histogramPhPj .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z17compute_histogramPhPj .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */ /* 0x000e220000002100 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0040*/ STS [R7.X4], RZ ; /* 0x000000ff07007388 */ /* 0x0011e20000004800 */ /*0050*/ IMAD R0, R0, c[0x0][0x0], R7 ; /* 0x0000000000007a24 */ /* 0x002fc600078e0207 */ /*0060*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe40000010000 */ /*0070*/ ISETP.GT.AND P0, PT, R0, 0x98967f, PT ; /* 0x0098967f0000780c */ /* 0x000fda0003f04270 */ /*0080*/ @P0 BRA 0x120 ; /* 0x0000009000000947 */ /* 0x001fea0003800000 */ /*0090*/ IADD3 R2, P0, R0, c[0x0][0x160], RZ ; /* 0x0000580000027a10 */ /* 0x001fc80007f1e0ff */ /*00a0*/ LEA.HI.X.SX32 R3, R0, c[0x0][0x164], 0x1, P0 ; /* 0x0000590000037a11 */ /* 0x000fca00000f0eff */ /*00b0*/ LDG.E.U8 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1100 */ /*00c0*/ MOV R5, c[0x0][0x0] ; /* 0x0000000000057a02 */ /* 0x000fe20000000f00 */ /*00d0*/ YIELD ; /* 0x0000000000007946 */ /* 0x000fe80003800000 */ /*00e0*/ IMAD R0, R5, c[0x0][0xc], R0 ; /* 0x0000030005007a24 */ /* 0x000fca00078e0200 */ /*00f0*/ ISETP.GE.AND P0, PT, R0, 0x989680, PT ; /* 0x009896800000780c */ /* 0x000fe20003f06270 */ /*0100*/ ATOMS.POPC.INC.32 RZ, [R2.X4+URZ] ; /* 0x0000000002ff7f8c */ /* 0x0041d8000d00403f */ /*0110*/ @!P0 BRA 0x90 ; /* 0xffffff7000008947 */ /* 0x000fea000383ffff */ /*0120*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */ /* 0x000fe20003800000 */ /*0130*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0140*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */ /* 0x001fc800078e00ff */ /*0150*/ IMAD.WIDE.U32 R2, R7, R2, c[0x0][0x168] ; /* 0x00005a0007027625 */ /* 0x000fe200078e0002 */ /*0160*/ LDS R5, [R7.X4] ; /* 0x0000000007057984 */ /* 0x000e280000004800 */ /*0170*/ RED.E.ADD.STRONG.GPU [R2.64], R5 ; /* 0x000000050200798e */ /* 0x001fe2000c10e184 */ /*0180*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0190*/ BRA 0x190; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17compute_histogramPhPj .globl _Z17compute_histogramPhPj .p2align 8 .type _Z17compute_histogramPhPj,@function _Z17compute_histogramPhPj: s_load_b32 s4, s[0:1], 0x1c s_add_u32 s2, s0, 16 s_addc_u32 s3, s1, 0 v_mov_b32_e32 v3, 0 s_waitcnt lgkmcnt(0) s_and_b32 s5, s4, 0xffff s_mov_b32 s4, exec_lo v_mad_u64_u32 v[1:2], null, s15, s5, v[0:1] v_lshlrev_b32_e32 v2, 2, v0 ds_store_b32 v2, v3 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv v_cmpx_gt_i32_e32 0x989680, v1 s_cbranch_execz .LBB0_3 s_load_b32 s6, s[2:3], 0x0 s_load_b64 s[2:3], s[0:1], 0x0 v_mov_b32_e32 v3, 1 s_waitcnt lgkmcnt(0) s_mul_i32 s5, s6, s5 s_mov_b32 s6, 0 .LBB0_2: v_ashrrev_i32_e32 v5, 31, v1 v_add_co_u32 v4, vcc_lo, s2, v1 v_add_nc_u32_e32 v1, s5, v1 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo v_cmp_lt_i32_e32 vcc_lo, 0x98967f, v1 global_load_u8 v4, v[4:5], off s_or_b32 s6, vcc_lo, s6 s_waitcnt vmcnt(0) v_lshlrev_b32_e32 v4, 2, v4 ds_add_u32 v4, v3 s_and_not1_b32 exec_lo, exec_lo, s6 s_cbranch_execnz .LBB0_2 .LBB0_3: s_or_b32 exec_lo, exec_lo, s4 s_load_b64 s[0:1], s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv ds_load_b32 v1, v2 v_lshlrev_b32_e32 v0, 2, v0 s_waitcnt lgkmcnt(0) global_atomic_add_u32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17compute_histogramPhPj .amdhsa_group_segment_fixed_size 1024 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17compute_histogramPhPj, .Lfunc_end0-_Z17compute_histogramPhPj .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 1024 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17compute_histogramPhPj .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17compute_histogramPhPj.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00090dd4_00000000-6_compute_histogram.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z39__device_stub__Z17compute_histogramPhPjPhPj .type _Z39__device_stub__Z17compute_histogramPhPjPhPj, @function _Z39__device_stub__Z17compute_histogramPhPjPhPj: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z17compute_histogramPhPj(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z39__device_stub__Z17compute_histogramPhPjPhPj, .-_Z39__device_stub__Z17compute_histogramPhPjPhPj .globl _Z17compute_histogramPhPj .type _Z17compute_histogramPhPj, @function _Z17compute_histogramPhPj: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z17compute_histogramPhPjPhPj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z17compute_histogramPhPj, .-_Z17compute_histogramPhPj .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z17compute_histogramPhPj" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z17compute_histogramPhPj(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "compute_histogram.hip" .globl _Z32__device_stub__compute_histogramPhPj # -- Begin function _Z32__device_stub__compute_histogramPhPj .p2align 4, 0x90 .type _Z32__device_stub__compute_histogramPhPj,@function _Z32__device_stub__compute_histogramPhPj: # @_Z32__device_stub__compute_histogramPhPj .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z17compute_histogramPhPj, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z32__device_stub__compute_histogramPhPj, .Lfunc_end0-_Z32__device_stub__compute_histogramPhPj .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17compute_histogramPhPj, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z17compute_histogramPhPj,@object # @_Z17compute_histogramPhPj .section .rodata,"a",@progbits .globl _Z17compute_histogramPhPj .p2align 3, 0x0 _Z17compute_histogramPhPj: .quad _Z32__device_stub__compute_histogramPhPj .size _Z17compute_histogramPhPj, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z17compute_histogramPhPj" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z32__device_stub__compute_histogramPhPj .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z17compute_histogramPhPj .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
__device__ double a[1024]; __global__ void simple_copy(double *b) { size_t i = threadIdx.x + blockDim.x * blockIdx.x; b[i] = a[i]; }
code for sm_80 Function : _Z11simple_copyPd .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */ /* 0x000e220000002100 */ /*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff057435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e240000002500 */ /*0050*/ IMAD R4, R3, c[0x0][0x0], R4 ; /* 0x0000000003047a24 */ /* 0x001fca00078e0204 */ /*0060*/ IMAD.WIDE.U32 R2, R4, R5, c[0x4][0x0] ; /* 0x0100000004027625 */ /* 0x000fcc00078e0005 */ /*0070*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1b00 */ /*0080*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fca00078e0005 */ /*0090*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */ /* 0x004fe2000c101b04 */ /*00a0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
__device__ double a[1024]; __global__ void simple_copy(double *b) { size_t i = threadIdx.x + blockDim.x * blockIdx.x; b[i] = a[i]; }
.file "tmpxft_001613d7_00000000-6_simple_copy_glob.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z31__device_stub__Z11simple_copyPdPd .type _Z31__device_stub__Z11simple_copyPdPd, @function _Z31__device_stub__Z11simple_copyPdPd: .LFB2051: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 88(%rsp), %rax subq %fs:40, %rax jne .L8 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z11simple_copyPd(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z31__device_stub__Z11simple_copyPdPd, .-_Z31__device_stub__Z11simple_copyPdPd .globl _Z11simple_copyPd .type _Z11simple_copyPd, @function _Z11simple_copyPd: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z11simple_copyPdPd addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z11simple_copyPd, .-_Z11simple_copyPd .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z11simple_copyPd" .LC1: .string "a" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z11simple_copyPd(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $8192, %r9d movl $0, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _ZL1a(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .local _ZL1a .comm _ZL1a,8192,32 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
__device__ double a[1024]; __global__ void simple_copy(double *b) { size_t i = threadIdx.x + blockDim.x * blockIdx.x; b[i] = a[i]; }
#include <hip/hip_runtime.h> __device__ double a[1024]; __global__ void simple_copy(double *b) { size_t i = threadIdx.x + blockDim.x * blockIdx.x; b[i] = a[i]; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> __device__ double a[1024]; __global__ void simple_copy(double *b) { size_t i = threadIdx.x + blockDim.x * blockIdx.x; b[i] = a[i]; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z11simple_copyPd .globl _Z11simple_copyPd .p2align 8 .type _Z11simple_copyPd,@function _Z11simple_copyPd: s_clause 0x1 s_load_b32 s2, s[0:1], 0x14 s_load_b64 s[0:1], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] v_mov_b32_e32 v2, 0 s_getpc_b64 s[2:3] s_add_u32 s2, s2, a@rel32@lo+4 s_addc_u32 s3, s3, a@rel32@hi+12 v_lshlrev_b64 v[0:1], 3, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, v0, s2 v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_load_b64 v[2:3], v[2:3], off s_waitcnt vmcnt(0) global_store_b64 v[0:1], v[2:3], off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11simple_copyPd .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 264 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z11simple_copyPd, .Lfunc_end0-_Z11simple_copyPd .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .protected a .type a,@object .section .bss,"aw",@nobits .globl a .p2align 4, 0x0 a: .zero 8192 .size a, 8192 .type __hip_cuid_,@object .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym a .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: hidden_block_count_x - .offset: 12 .size: 4 .value_kind: hidden_block_count_y - .offset: 16 .size: 4 .value_kind: hidden_block_count_z - .offset: 20 .size: 2 .value_kind: hidden_group_size_x - .offset: 22 .size: 2 .value_kind: hidden_group_size_y - .offset: 24 .size: 2 .value_kind: hidden_group_size_z - .offset: 26 .size: 2 .value_kind: hidden_remainder_x - .offset: 28 .size: 2 .value_kind: hidden_remainder_y - .offset: 30 .size: 2 .value_kind: hidden_remainder_z - .offset: 48 .size: 8 .value_kind: hidden_global_offset_x - .offset: 56 .size: 8 .value_kind: hidden_global_offset_y - .offset: 64 .size: 8 .value_kind: hidden_global_offset_z - .offset: 72 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 264 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11simple_copyPd .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z11simple_copyPd.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> __device__ double a[1024]; __global__ void simple_copy(double *b) { size_t i = threadIdx.x + blockDim.x * blockIdx.x; b[i] = a[i]; }
.text .file "simple_copy_glob.hip" .globl _Z26__device_stub__simple_copyPd # -- Begin function _Z26__device_stub__simple_copyPd .p2align 4, 0x90 .type _Z26__device_stub__simple_copyPd,@function _Z26__device_stub__simple_copyPd: # @_Z26__device_stub__simple_copyPd .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z11simple_copyPd, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end0: .size _Z26__device_stub__simple_copyPd, .Lfunc_end0-_Z26__device_stub__simple_copyPd .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11simple_copyPd, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $0, 8(%rsp) movl $0, (%rsp) movl $a, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movl $8192, %r9d # imm = 0x2000 movq %rbx, %rdi xorl %r8d, %r8d callq __hipRegisterVar movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type a,@object # @a .local a .comm a,8192,16 .type _Z11simple_copyPd,@object # @_Z11simple_copyPd .section .rodata,"a",@progbits .globl _Z11simple_copyPd .p2align 3, 0x0 _Z11simple_copyPd: .quad _Z26__device_stub__simple_copyPd .size _Z11simple_copyPd, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z11simple_copyPd" .size .L__unnamed_1, 18 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "a" .size .L__unnamed_2, 2 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z26__device_stub__simple_copyPd .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym a .addrsig_sym _Z11simple_copyPd .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z11simple_copyPd .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */ /* 0x000e220000002100 */ /*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff057435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e240000002500 */ /*0050*/ IMAD R4, R3, c[0x0][0x0], R4 ; /* 0x0000000003047a24 */ /* 0x001fca00078e0204 */ /*0060*/ IMAD.WIDE.U32 R2, R4, R5, c[0x4][0x0] ; /* 0x0100000004027625 */ /* 0x000fcc00078e0005 */ /*0070*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1b00 */ /*0080*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fca00078e0005 */ /*0090*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */ /* 0x004fe2000c101b04 */ /*00a0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z11simple_copyPd .globl _Z11simple_copyPd .p2align 8 .type _Z11simple_copyPd,@function _Z11simple_copyPd: s_clause 0x1 s_load_b32 s2, s[0:1], 0x14 s_load_b64 s[0:1], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] v_mov_b32_e32 v2, 0 s_getpc_b64 s[2:3] s_add_u32 s2, s2, a@rel32@lo+4 s_addc_u32 s3, s3, a@rel32@hi+12 v_lshlrev_b64 v[0:1], 3, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, v0, s2 v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_load_b64 v[2:3], v[2:3], off s_waitcnt vmcnt(0) global_store_b64 v[0:1], v[2:3], off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11simple_copyPd .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 264 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z11simple_copyPd, .Lfunc_end0-_Z11simple_copyPd .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .protected a .type a,@object .section .bss,"aw",@nobits .globl a .p2align 4, 0x0 a: .zero 8192 .size a, 8192 .type __hip_cuid_,@object .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym a .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: hidden_block_count_x - .offset: 12 .size: 4 .value_kind: hidden_block_count_y - .offset: 16 .size: 4 .value_kind: hidden_block_count_z - .offset: 20 .size: 2 .value_kind: hidden_group_size_x - .offset: 22 .size: 2 .value_kind: hidden_group_size_y - .offset: 24 .size: 2 .value_kind: hidden_group_size_z - .offset: 26 .size: 2 .value_kind: hidden_remainder_x - .offset: 28 .size: 2 .value_kind: hidden_remainder_y - .offset: 30 .size: 2 .value_kind: hidden_remainder_z - .offset: 48 .size: 8 .value_kind: hidden_global_offset_x - .offset: 56 .size: 8 .value_kind: hidden_global_offset_y - .offset: 64 .size: 8 .value_kind: hidden_global_offset_z - .offset: 72 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 264 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11simple_copyPd .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z11simple_copyPd.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001613d7_00000000-6_simple_copy_glob.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z31__device_stub__Z11simple_copyPdPd .type _Z31__device_stub__Z11simple_copyPdPd, @function _Z31__device_stub__Z11simple_copyPdPd: .LFB2051: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 88(%rsp), %rax subq %fs:40, %rax jne .L8 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z11simple_copyPd(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z31__device_stub__Z11simple_copyPdPd, .-_Z31__device_stub__Z11simple_copyPdPd .globl _Z11simple_copyPd .type _Z11simple_copyPd, @function _Z11simple_copyPd: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z11simple_copyPdPd addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z11simple_copyPd, .-_Z11simple_copyPd .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z11simple_copyPd" .LC1: .string "a" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z11simple_copyPd(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $8192, %r9d movl $0, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _ZL1a(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .local _ZL1a .comm _ZL1a,8192,32 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "simple_copy_glob.hip" .globl _Z26__device_stub__simple_copyPd # -- Begin function _Z26__device_stub__simple_copyPd .p2align 4, 0x90 .type _Z26__device_stub__simple_copyPd,@function _Z26__device_stub__simple_copyPd: # @_Z26__device_stub__simple_copyPd .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z11simple_copyPd, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end0: .size _Z26__device_stub__simple_copyPd, .Lfunc_end0-_Z26__device_stub__simple_copyPd .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11simple_copyPd, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $0, 8(%rsp) movl $0, (%rsp) movl $a, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movl $8192, %r9d # imm = 0x2000 movq %rbx, %rdi xorl %r8d, %r8d callq __hipRegisterVar movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type a,@object # @a .local a .comm a,8192,16 .type _Z11simple_copyPd,@object # @_Z11simple_copyPd .section .rodata,"a",@progbits .globl _Z11simple_copyPd .p2align 3, 0x0 _Z11simple_copyPd: .quad _Z26__device_stub__simple_copyPd .size _Z11simple_copyPd, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z11simple_copyPd" .size .L__unnamed_1, 18 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "a" .size .L__unnamed_2, 2 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z26__device_stub__simple_copyPd .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym a .addrsig_sym _Z11simple_copyPd .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> /* reduction.cu A demonstration of array reduction using CUDA Created for GPU Architecture and Programming Spring 2012, New York University Copyright 2012 Guy Dickinson <guy.dickinson@nyu.edu> */ // Vanilla, sequential reduction on host // This *would* have a divergence problem if it were multithreaded int findMaxOnHost(int* A, int length) { for (int step = 1; step < length; step *= 2) { for (int i = 0; i < length; i += (2 * step)) { int candidate = A[i + step]; if (A[i] < candidate) { A[i] = candidate; } } } return A[0]; } __global__ void findMaxNaivelyKernel(int* A) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int step = 1; step < (blockDim.x * gridDim.x); step *= 2) { __syncthreads(); if (tid % (2 * step) == 0) { int candidate = A[tid + step]; if (A[tid] < candidate) { A[tid] = candidate; } } } } // Naively find the maximum element, without taking into account thread // diversion or memory efficiency int cudaFindMaxOnDeviceNaively(int* A, int length) { int size = length * sizeof(int); int ret; int* cudaArray; cudaMalloc(&cudaArray, size); cudaMemcpy(cudaArray, A, size, cudaMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length/512, 1); findMaxNaivelyKernel<<<dimGrid, dimBlock>>>(cudaArray); // Recover just the first element from the device to save time. cudaMemcpy(&ret, cudaArray, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(cudaArray); return ret; } __global__ void findMaxWithoutDivergenceKernel(int* A) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int step = blockDim.x * gridDim.x >> 1; step > 0; step >>= 1) { __syncthreads(); if (tid < step) { int candidate = A[tid + step]; if (A[tid] < candidate) { A[tid] = candidate; } } } } int cudaFindMaxWithoutDivergence(int* A, int length) { int size = length * sizeof(int); int ret; int* cudaArray; cudaMalloc(&cudaArray, size); cudaMemcpy(cudaArray, A, size, cudaMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length / 512, 1); findMaxWithoutDivergenceKernel<<<dimGrid, dimBlock>>>(cudaArray); // Recover just the first element from the device to save time. cudaMemcpy(&ret, cudaArray, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(cudaArray); return ret; } __global__ void findMaxWithSharedMemoryKernel(int* A) { //Static allocation like this is pretty lame but good enough for testing. __shared__ int sharedmem[512]; // copy this chunk into shared memory from global unsigned int threadIndex = threadIdx.x; unsigned int globalThreadId = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int nThreads = (blockDim.x * gridDim.x); sharedmem[threadIndex] = (globalThreadId < nThreads) ? A[globalThreadId] : 0; // Do the reduction in shared memory for (unsigned int step = blockDim.x >> 1; step > 0; step >>= 1) { __syncthreads(); if (threadIndex < step) { int candidate = sharedmem[threadIndex + step]; if (sharedmem[threadIndex] < candidate) { sharedmem[threadIndex] = candidate; } } } __syncthreads(); // Thread 0 within a block writes the result back to global memory if (threadIndex == 0) { A[blockIdx.x] = sharedmem[0]; } __syncthreads(); // Now the global data structure has as elements 0-gridDim.x filled with // the results which we need to reduce again. if (globalThreadId < gridDim.x) { for (unsigned int step = gridDim.x >> 1; step > 0; step >>= 1) { int candidate = A[globalThreadId + step]; if (A[globalThreadId] < candidate) { A[globalThreadId] = candidate; } } } } int cudaFindMaxWithSharedMemory(int* A, int length) { int size = length * sizeof(int); int ret; int* cudaArray; cudaMalloc(&cudaArray, size); cudaMemcpy(cudaArray, A, size, cudaMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length / 512, 1); findMaxWithSharedMemoryKernel<<<dimGrid, dimBlock>>>(cudaArray); // Recover just the first element from the device to save time. cudaMemcpy(&ret, cudaArray, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(cudaArray); return ret; } // Returns a pointer to an array of ints of size length. // For verification purposes, the biggest int will be at the end. int* initializeArray(int length) { // Set up an array of ints of the right length void *ptr; ptr = malloc(length * sizeof(int)); if (ptr == NULL) { // Handle allocation error } int* A = (int*) ptr; // Fill it with ints int j = 1; for (int i = 0; i < length; i++) { A[i] = j; j += 2; } return A; } void printTiming(timeval start, timeval end, char* desc, int length) { double elapsed = (end.tv_usec - start.tv_usec); printf("%s length %d: %g usec\n", desc, elapsed, length); } void checkAndPrintResult(int expected, int actual, char* desc) { if (expected == actual) { printf("%s passed check ", desc); } else { printf("%s failed check " , desc); } printf("(expected %d, got %d)\n", expected, actual); } // Wrapper function for launching each test component. void launchTestWithTiming( int testType, int length) { int actual; char* desc; timeval start, end; int* array = initializeArray(length); int expected = array[length - 1]; gettimeofday(&start, NULL); switch (testType) { // CPU case 0: desc = "Serial"; actual = findMaxOnHost(array, length); break; // Naive Parallel case 1: desc = "Naive Parallel"; actual = cudaFindMaxOnDeviceNaively(array, length); break; // Non Divergent Parallel case 2: desc = "Non Divergent Parallel"; actual = cudaFindMaxWithoutDivergence(array, length); break; // Shared Memory Parallel case 3: desc = "Shared Memory Parallel"; actual = cudaFindMaxWithSharedMemory(array, length); break; default: desc = "Unrecognized!"; break; } gettimeofday(&end, NULL); free(array); checkAndPrintResult(expected, actual, desc); printTiming(start, end, desc, length); } void runTest(int length) { launchTestWithTiming(0, length); launchTestWithTiming(1, length); launchTestWithTiming(1, length); launchTestWithTiming(2, length); launchTestWithTiming(3, length); } int main(void) { runTest(1024); runTest(4096); return 0; }
code for sm_80 Function : _Z29findMaxWithSharedMemoryKernelPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */ /* 0x000e220000002500 */ /*0020*/ ULDC UR5, c[0x0][0x0] ; /* 0x0000000000057ab9 */ /* 0x000fe20000000800 */ /*0030*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fe200078e00ff */ /*0040*/ ULDC UR4, c[0x0][0xc] ; /* 0x0000030000047ab9 */ /* 0x000fe20000000800 */ /*0050*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */ /* 0x000e220000002100 */ /*0060*/ UIMAD UR4, UR5, UR4, URZ ; /* 0x00000004050472a4 */ /* 0x000fe2000f8e023f */ /*0070*/ BSSY B0, 0x100 ; /* 0x0000008000007945 */ /* 0x000fe20003800000 */ /*0080*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fe20000000a00 */ /*0090*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */ /* 0x000fe400078e00ff */ /*00a0*/ IMAD R0, R8, c[0x0][0x0], R9 ; /* 0x0000000008007a24 */ /* 0x001fc800078e0209 */ /*00b0*/ IMAD.WIDE.U32 R2, R0.reuse, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x040fe200078e0003 */ /*00c0*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */ /* 0x000fda000bf06070 */ /*00d0*/ @P0 BRA 0xf0 ; /* 0x0000001000000947 */ /* 0x000fea0003800000 */ /*00e0*/ LDG.E R4, [R2.64] ; /* 0x0000000602047981 */ /* 0x000164000c1e1900 */ /*00f0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0100*/ USHF.R.U32.HI UR4, URZ, 0x1, UR5 ; /* 0x000000013f047899 */ /* 0x000fe20008011605 */ /*0110*/ STS [R9.X4], R4 ; /* 0x0000000409007388 */ /* 0x0203ea0000004800 */ /*0120*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fda000bf05270 */ /*0130*/ @!P0 BRA 0x230 ; /* 0x000000f000008947 */ /* 0x000fea0003800000 */ /*0140*/ IMAD.SHL.U32 R4, R9, 0x4, RZ ; /* 0x0000000409047824 */ /* 0x002fe200078e00ff */ /*0150*/ MOV R5, UR4 ; /* 0x0000000400057c02 */ /* 0x000fc60008000f00 */ /*0160*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0170*/ ISETP.GE.U32.AND P0, PT, R9, R5, PT ; /* 0x000000050900720c */ /* 0x000fca0003f06070 */ /*0180*/ BSSY B0, 0x200 ; /* 0x0000007000007945 */ /* 0x000ff00003800000 */ /*0190*/ @P0 BRA 0x1f0 ; /* 0x0000005000000947 */ /* 0x002fea0003800000 */ /*01a0*/ IMAD R6, R5, 0x4, R4 ; /* 0x0000000405067824 */ /* 0x000fe200078e0204 */ /*01b0*/ LDS R7, [R9.X4] ; /* 0x0000000009077984 */ /* 0x000fea0000004800 */ /*01c0*/ LDS R6, [R6] ; /* 0x0000000006067984 */ /* 0x000e640000000800 */ /*01d0*/ ISETP.GE.AND P0, PT, R7, R6, PT ; /* 0x000000060700720c */ /* 0x002fda0003f06270 */ /*01e0*/ @!P0 STS [R9.X4], R6 ; /* 0x0000000609008388 */ /* 0x0003e40000004800 */ /*01f0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0200*/ SHF.R.U32.HI R5, RZ, 0x1, R5 ; /* 0x00000001ff057819 */ /* 0x000fc80000011605 */ /*0210*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fda0003f05270 */ /*0220*/ @P0 BRA 0x160 ; /* 0xffffff3000000947 */ /* 0x000fea000383ffff */ /*0230*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x002fe20000010000 */ /*0240*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */ /* 0x000fe40003f05270 */ /*0250*/ ISETP.GE.U32.AND P1, PT, R0, c[0x0][0xc], PT ; /* 0x0000030000007a0c */ /* 0x000fd60003f26070 */ /*0260*/ @!P0 IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff058424 */ /* 0x000fc800078e00ff */ /*0270*/ @!P0 IMAD.WIDE.U32 R4, R8, R5, c[0x0][0x160] ; /* 0x0000580008048625 */ /* 0x000fe200078e0005 */ /*0280*/ @!P0 LDS R7, [RZ] ; /* 0x00000000ff078984 */ /* 0x000e680000000800 */ /*0290*/ @!P0 STG.E [R4.64], R7 ; /* 0x0000000704008986 */ /* 0x0023e8000c101906 */ /*02a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*02b0*/ @P1 EXIT ; /* 0x000000000000194d */ /* 0x000fea0003800000 */ /*02c0*/ ULDC UR4, c[0x0][0xc] ; /* 0x0000030000047ab9 */ /* 0x002fe40000000800 */ /*02d0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */ /* 0x000fcc0008011604 */ /*02e0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fda000bf05270 */ /*02f0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0300*/ LDG.E R6, [R2.64] ; /* 0x0000000602067981 */ /* 0x000364000c1e1900 */ /*0310*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x004fe200000001ff */ /*0320*/ IADD3 R4, R0, UR4, RZ ; /* 0x0000000400047c10 */ /* 0x000fd2000fffe0ff */ /*0330*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fcc00078e0005 */ /*0340*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */ /* 0x000ea2000c1e1900 */ /*0350*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */ /* 0x000fcc0008011604 */ /*0360*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fe4000bf25270 */ /*0370*/ ISETP.GE.AND P0, PT, R6, R5, PT ; /* 0x000000050600720c */ /* 0x024fda0003f06270 */ /*0380*/ @!P0 STG.E [R2.64], R5 ; /* 0x0000000502008986 */ /* 0x0005e2000c101906 */ /*0390*/ @!P0 IMAD.MOV.U32 R6, RZ, RZ, R5 ; /* 0x000000ffff068224 */ /* 0x000fe200078e0005 */ /*03a0*/ @P1 BRA 0x310 ; /* 0xffffff6000001947 */ /* 0x000fea000383ffff */ /*03b0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*03c0*/ BRA 0x3c0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*03d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0400*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0410*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0420*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0430*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0440*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0450*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0460*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0470*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z30findMaxWithoutDivergenceKernelPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */ /* 0x000fe20000000800 */ /*0020*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0030*/ ULDC UR5, c[0x0][0xc] ; /* 0x0000030000057ab9 */ /* 0x000fe40000000800 */ /*0040*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */ /* 0x000fe2000f8e023f */ /*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e660000002100 */ /*0060*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */ /* 0x000fcc0008011604 */ /*0070*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fda000bf05270 */ /*0080*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0090*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x001fe200000001ff */ /*00a0*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x002fe200078e0203 */ /*00b0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fd00000000a00 */ /*00c0*/ IMAD.WIDE.U32 R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fc800078e0005 */ /*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*00e0*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */ /* 0x000fca000bf06070 */ /*00f0*/ BSSY B0, 0x190 ; /* 0x0000009000007945 */ /* 0x000ff00003800000 */ /*0100*/ @P0 BRA 0x180 ; /* 0x0000007000000947 */ /* 0x001fea0003800000 */ /*0110*/ IADD3 R4, R0, UR4, RZ ; /* 0x0000000400047c10 */ /* 0x000fe2000fffe0ff */ /*0120*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x000fe200078e00ff */ /*0130*/ LDG.E R6, [R2.64] ; /* 0x0000000602067981 */ /* 0x000ea6000c1e1900 */ /*0140*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fcc00078e0005 */ /*0150*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */ /* 0x000ea4000c1e1900 */ /*0160*/ ISETP.GE.AND P0, PT, R6, R5, PT ; /* 0x000000050600720c */ /* 0x004fda0003f06270 */ /*0170*/ @!P0 STG.E [R2.64], R5 ; /* 0x0000000502008986 */ /* 0x0001e4000c101906 */ /*0180*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0190*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */ /* 0x000fcc0008011604 */ /*01a0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fda000bf05270 */ /*01b0*/ @P0 BRA 0xd0 ; /* 0xffffff1000000947 */ /* 0x000fea000383ffff */ /*01c0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z20findMaxNaivelyKernelPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff067624 */ /* 0x000fe200078e00ff */ /*0020*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e260000002500 */ /*0030*/ IMAD R6, R6, c[0x0][0xc], RZ ; /* 0x0000030006067a24 */ /* 0x000fe200078e02ff */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e680000002100 */ /*0050*/ ISETP.GE.U32.AND P0, PT, R6, 0x2, PT ; /* 0x000000020600780c */ /* 0x000fda0003f06070 */ /*0060*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0070*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0080*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x003fe200078e0203 */ /*0090*/ UMOV UR4, 0x1 ; /* 0x0000000100047882 */ /* 0x000fe40000000000 */ /*00a0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fcc0000000a00 */ /*00b0*/ IMAD.WIDE.U32 R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fc800078e0005 */ /*00c0*/ UMOV UR5, UR4 ; /* 0x0000000400057c82 */ /* 0x000fe20008000000 */ /*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*00e0*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */ /* 0x000fca000800063f */ /*00f0*/ BSSY B0, 0x2c0 ; /* 0x000001c000007945 */ /* 0x000fe20003800000 */ /*0100*/ IMAD R9, RZ, RZ, -UR4 ; /* 0x80000004ff097e24 */ /* 0x000fe2000f8e02ff */ /*0110*/ ISETP.NE.U32.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fe4000bf25070 */ /*0120*/ I2F.U32.RP R7, UR4 ; /* 0x0000000400077d06 */ /* 0x001e300008209000 */ /*0130*/ MUFU.RCP R7, R7 ; /* 0x0000000700077308 */ /* 0x001e240000001000 */ /*0140*/ IADD3 R4, R7, 0xffffffe, RZ ; /* 0x0ffffffe07047810 */ /* 0x001fcc0007ffe0ff */ /*0150*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */ /* 0x000064000021f000 */ /*0160*/ MOV R4, RZ ; /* 0x000000ff00047202 */ /* 0x001fe20000000f00 */ /*0170*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */ /* 0x002fc800078e02ff */ /*0180*/ IMAD.HI.U32 R5, R5, R9, R4 ; /* 0x0000000905057227 */ /* 0x000fcc00078e0004 */ /*0190*/ IMAD.HI.U32 R5, R5, R0, RZ ; /* 0x0000000005057227 */ /* 0x000fc800078e00ff */ /*01a0*/ IMAD.MOV R5, RZ, RZ, -R5 ; /* 0x000000ffff057224 */ /* 0x000fc800078e0a05 */ /*01b0*/ IMAD R5, R5, UR4, R0 ; /* 0x0000000405057c24 */ /* 0x000fca000f8e0200 */ /*01c0*/ ISETP.GE.U32.AND P0, PT, R5, UR4, PT ; /* 0x0000000405007c0c */ /* 0x000fda000bf06070 */ /*01d0*/ @P0 IADD3 R5, R5, -UR4, RZ ; /* 0x8000000405050c10 */ /* 0x000fc8000fffe0ff */ /*01e0*/ ISETP.GE.U32.AND P0, PT, R5, UR4, PT ; /* 0x0000000405007c0c */ /* 0x000fda000bf06070 */ /*01f0*/ @P0 IADD3 R5, R5, -UR4, RZ ; /* 0x8000000405050c10 */ /* 0x000fe4000fffe0ff */ /*0200*/ @!P1 LOP3.LUT R5, RZ, UR4, RZ, 0x33, !PT ; /* 0x00000004ff059c12 */ /* 0x000fe4000f8e33ff */ /*0210*/ ISETP.LE.U32.AND P0, PT, R6, UR4, PT ; /* 0x0000000406007c0c */ /* 0x000fe4000bf03070 */ /*0220*/ ISETP.NE.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fda0003f25270 */ /*0230*/ @P1 BRA 0x2b0 ; /* 0x0000007000001947 */ /* 0x000fea0003800000 */ /*0240*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0250*/ IADD3 R4, R0, UR5, RZ ; /* 0x0000000500047c10 */ /* 0x000fe2000fffe0ff */ /*0260*/ LDG.E R7, [R2.64] ; /* 0x0000000602077981 */ /* 0x000eb0000c1e1900 */ /*0270*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fcc00078e0005 */ /*0280*/ LDG.E R4, [R4.64] ; /* 0x0000000604047981 */ /* 0x000ea4000c1e1900 */ /*0290*/ ISETP.GE.AND P1, PT, R7, R4, PT ; /* 0x000000040700720c */ /* 0x004fda0003f26270 */ /*02a0*/ @!P1 STG.E [R2.64], R4 ; /* 0x0000000402009986 */ /* 0x0001e4000c101906 */ /*02b0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*02c0*/ @!P0 BRA 0xc0 ; /* 0xfffffdf000008947 */ /* 0x000fea000383ffff */ /*02d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*02e0*/ BRA 0x2e0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> /* reduction.cu A demonstration of array reduction using CUDA Created for GPU Architecture and Programming Spring 2012, New York University Copyright 2012 Guy Dickinson <guy.dickinson@nyu.edu> */ // Vanilla, sequential reduction on host // This *would* have a divergence problem if it were multithreaded int findMaxOnHost(int* A, int length) { for (int step = 1; step < length; step *= 2) { for (int i = 0; i < length; i += (2 * step)) { int candidate = A[i + step]; if (A[i] < candidate) { A[i] = candidate; } } } return A[0]; } __global__ void findMaxNaivelyKernel(int* A) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int step = 1; step < (blockDim.x * gridDim.x); step *= 2) { __syncthreads(); if (tid % (2 * step) == 0) { int candidate = A[tid + step]; if (A[tid] < candidate) { A[tid] = candidate; } } } } // Naively find the maximum element, without taking into account thread // diversion or memory efficiency int cudaFindMaxOnDeviceNaively(int* A, int length) { int size = length * sizeof(int); int ret; int* cudaArray; cudaMalloc(&cudaArray, size); cudaMemcpy(cudaArray, A, size, cudaMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length/512, 1); findMaxNaivelyKernel<<<dimGrid, dimBlock>>>(cudaArray); // Recover just the first element from the device to save time. cudaMemcpy(&ret, cudaArray, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(cudaArray); return ret; } __global__ void findMaxWithoutDivergenceKernel(int* A) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int step = blockDim.x * gridDim.x >> 1; step > 0; step >>= 1) { __syncthreads(); if (tid < step) { int candidate = A[tid + step]; if (A[tid] < candidate) { A[tid] = candidate; } } } } int cudaFindMaxWithoutDivergence(int* A, int length) { int size = length * sizeof(int); int ret; int* cudaArray; cudaMalloc(&cudaArray, size); cudaMemcpy(cudaArray, A, size, cudaMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length / 512, 1); findMaxWithoutDivergenceKernel<<<dimGrid, dimBlock>>>(cudaArray); // Recover just the first element from the device to save time. cudaMemcpy(&ret, cudaArray, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(cudaArray); return ret; } __global__ void findMaxWithSharedMemoryKernel(int* A) { //Static allocation like this is pretty lame but good enough for testing. __shared__ int sharedmem[512]; // copy this chunk into shared memory from global unsigned int threadIndex = threadIdx.x; unsigned int globalThreadId = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int nThreads = (blockDim.x * gridDim.x); sharedmem[threadIndex] = (globalThreadId < nThreads) ? A[globalThreadId] : 0; // Do the reduction in shared memory for (unsigned int step = blockDim.x >> 1; step > 0; step >>= 1) { __syncthreads(); if (threadIndex < step) { int candidate = sharedmem[threadIndex + step]; if (sharedmem[threadIndex] < candidate) { sharedmem[threadIndex] = candidate; } } } __syncthreads(); // Thread 0 within a block writes the result back to global memory if (threadIndex == 0) { A[blockIdx.x] = sharedmem[0]; } __syncthreads(); // Now the global data structure has as elements 0-gridDim.x filled with // the results which we need to reduce again. if (globalThreadId < gridDim.x) { for (unsigned int step = gridDim.x >> 1; step > 0; step >>= 1) { int candidate = A[globalThreadId + step]; if (A[globalThreadId] < candidate) { A[globalThreadId] = candidate; } } } } int cudaFindMaxWithSharedMemory(int* A, int length) { int size = length * sizeof(int); int ret; int* cudaArray; cudaMalloc(&cudaArray, size); cudaMemcpy(cudaArray, A, size, cudaMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length / 512, 1); findMaxWithSharedMemoryKernel<<<dimGrid, dimBlock>>>(cudaArray); // Recover just the first element from the device to save time. cudaMemcpy(&ret, cudaArray, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(cudaArray); return ret; } // Returns a pointer to an array of ints of size length. // For verification purposes, the biggest int will be at the end. int* initializeArray(int length) { // Set up an array of ints of the right length void *ptr; ptr = malloc(length * sizeof(int)); if (ptr == NULL) { // Handle allocation error } int* A = (int*) ptr; // Fill it with ints int j = 1; for (int i = 0; i < length; i++) { A[i] = j; j += 2; } return A; } void printTiming(timeval start, timeval end, char* desc, int length) { double elapsed = (end.tv_usec - start.tv_usec); printf("%s length %d: %g usec\n", desc, elapsed, length); } void checkAndPrintResult(int expected, int actual, char* desc) { if (expected == actual) { printf("%s passed check ", desc); } else { printf("%s failed check " , desc); } printf("(expected %d, got %d)\n", expected, actual); } // Wrapper function for launching each test component. void launchTestWithTiming( int testType, int length) { int actual; char* desc; timeval start, end; int* array = initializeArray(length); int expected = array[length - 1]; gettimeofday(&start, NULL); switch (testType) { // CPU case 0: desc = "Serial"; actual = findMaxOnHost(array, length); break; // Naive Parallel case 1: desc = "Naive Parallel"; actual = cudaFindMaxOnDeviceNaively(array, length); break; // Non Divergent Parallel case 2: desc = "Non Divergent Parallel"; actual = cudaFindMaxWithoutDivergence(array, length); break; // Shared Memory Parallel case 3: desc = "Shared Memory Parallel"; actual = cudaFindMaxWithSharedMemory(array, length); break; default: desc = "Unrecognized!"; break; } gettimeofday(&end, NULL); free(array); checkAndPrintResult(expected, actual, desc); printTiming(start, end, desc, length); } void runTest(int length) { launchTestWithTiming(0, length); launchTestWithTiming(1, length); launchTestWithTiming(1, length); launchTestWithTiming(2, length); launchTestWithTiming(3, length); } int main(void) { runTest(1024); runTest(4096); return 0; }
.file "tmpxft_000ea68b_00000000-6_reduction.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2069: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2069: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z13findMaxOnHostPii .type _Z13findMaxOnHostPii, @function _Z13findMaxOnHostPii: .LFB2057: .cfi_startproc endbr64 movl $1, %r8d cmpl $1, %esi jg .L4 .L5: movl (%rdi), %eax ret .L6: addl %r8d, %edx addq %r10, %rax cmpl %edx, %esi jle .L11 .L7: movl (%rax,%r9,4), %ecx cmpl %ecx, (%rax) jge .L6 movl %ecx, (%rax) jmp .L6 .L11: cmpl %esi, %r8d jge .L5 .L4: movl %r8d, %r9d addl %r8d, %r8d movslq %r8d, %r10 salq $2, %r10 movq %rdi, %rax movl $0, %edx movslq %r9d, %r9 jmp .L7 .cfi_endproc .LFE2057: .size _Z13findMaxOnHostPii, .-_Z13findMaxOnHostPii .globl _Z15initializeArrayi .type _Z15initializeArrayi, @function _Z15initializeArrayi: .LFB2061: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movl %edi, %ebx movslq %edi, %rdi salq $2, %rdi call malloc@PLT testl %ebx, %ebx jle .L12 movq %rax, %rcx leal 1(%rbx,%rbx), %esi movl $1, %edx .L14: movl %edx, (%rcx) addl $2, %edx addq $4, %rcx cmpl %esi, %edx jne .L14 .L12: popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _Z15initializeArrayi, .-_Z15initializeArrayi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%s length %d: %g usec\n" .text .globl _Z11printTiming7timevalS_Pci .type _Z11printTiming7timevalS_Pci, @function _Z11printTiming7timevalS_Pci: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq %rcx, %rax movq %r8, %rdx movl %r9d, %ecx subq %rsi, %rax pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 leaq .LC0(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _Z11printTiming7timevalS_Pci, .-_Z11printTiming7timevalS_Pci .section .rodata.str1.1 .LC1: .string "%s passed check " .LC2: .string "%s failed check " .LC3: .string "(expected %d, got %d)\n" .text .globl _Z19checkAndPrintResultiiPc .type _Z19checkAndPrintResultiiPc, @function _Z19checkAndPrintResultiiPc: .LFB2063: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movl %edi, %ebx movl %esi, %ebp cmpl %esi, %edi je .L23 leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L21: movl %ebp, %ecx movl %ebx, %edx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L21 .cfi_endproc .LFE2063: .size _Z19checkAndPrintResultiiPc, .-_Z19checkAndPrintResultiiPc .globl _Z40__device_stub__Z20findMaxNaivelyKernelPiPi .type _Z40__device_stub__Z20findMaxNaivelyKernelPiPi, @function _Z40__device_stub__Z20findMaxNaivelyKernelPiPi: .LFB2091: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L28 .L24: movq 88(%rsp), %rax subq %fs:40, %rax jne .L29 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L28: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z20findMaxNaivelyKernelPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L24 .L29: call __stack_chk_fail@PLT .cfi_endproc .LFE2091: .size _Z40__device_stub__Z20findMaxNaivelyKernelPiPi, .-_Z40__device_stub__Z20findMaxNaivelyKernelPiPi .globl _Z20findMaxNaivelyKernelPi .type _Z20findMaxNaivelyKernelPi, @function _Z20findMaxNaivelyKernelPi: .LFB2092: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z40__device_stub__Z20findMaxNaivelyKernelPiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2092: .size _Z20findMaxNaivelyKernelPi, .-_Z20findMaxNaivelyKernelPi .globl _Z26cudaFindMaxOnDeviceNaivelyPii .type _Z26cudaFindMaxOnDeviceNaivelyPii, @function _Z26cudaFindMaxOnDeviceNaivelyPii: .LFB2058: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $48, %rsp .cfi_def_cfa_offset 80 movq %rdi, %r12 movl %esi, %ebx movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leal 0(,%rsi,4), %ebp movslq %ebp, %rbp leaq 8(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbp, %rdx movq %r12, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $512, 16(%rsp) movl $1, 20(%rsp) leal 511(%rbx), %eax testl %ebx, %ebx cmovns %ebx, %eax sarl $9, %eax movl %eax, 28(%rsp) movl $1, 32(%rsp) movl $0, %r9d movl $0, %r8d movq 16(%rsp), %rdx movl $1, %ecx movq 28(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L36 .L33: leaq 4(%rsp), %rdi movl $2, %ecx movl $4, %edx movq 8(%rsp), %rsi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movl 4(%rsp), %eax movq 40(%rsp), %rdx subq %fs:40, %rdx jne .L37 addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L36: .cfi_restore_state movq 8(%rsp), %rdi call _Z40__device_stub__Z20findMaxNaivelyKernelPiPi jmp .L33 .L37: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size _Z26cudaFindMaxOnDeviceNaivelyPii, .-_Z26cudaFindMaxOnDeviceNaivelyPii .globl _Z50__device_stub__Z30findMaxWithoutDivergenceKernelPiPi .type _Z50__device_stub__Z30findMaxWithoutDivergenceKernelPiPi, @function _Z50__device_stub__Z30findMaxWithoutDivergenceKernelPiPi: .LFB2093: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L42 .L38: movq 88(%rsp), %rax subq %fs:40, %rax jne .L43 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L42: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z30findMaxWithoutDivergenceKernelPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L38 .L43: call __stack_chk_fail@PLT .cfi_endproc .LFE2093: .size _Z50__device_stub__Z30findMaxWithoutDivergenceKernelPiPi, .-_Z50__device_stub__Z30findMaxWithoutDivergenceKernelPiPi .globl _Z30findMaxWithoutDivergenceKernelPi .type _Z30findMaxWithoutDivergenceKernelPi, @function _Z30findMaxWithoutDivergenceKernelPi: .LFB2094: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z50__device_stub__Z30findMaxWithoutDivergenceKernelPiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2094: .size _Z30findMaxWithoutDivergenceKernelPi, .-_Z30findMaxWithoutDivergenceKernelPi .globl _Z28cudaFindMaxWithoutDivergencePii .type _Z28cudaFindMaxWithoutDivergencePii, @function _Z28cudaFindMaxWithoutDivergencePii: .LFB2059: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $48, %rsp .cfi_def_cfa_offset 80 movq %rdi, %r12 movl %esi, %ebx movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leal 0(,%rsi,4), %ebp movslq %ebp, %rbp leaq 8(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbp, %rdx movq %r12, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $512, 16(%rsp) movl $1, 20(%rsp) leal 511(%rbx), %eax testl %ebx, %ebx cmovns %ebx, %eax sarl $9, %eax movl %eax, 28(%rsp) movl $1, 32(%rsp) movl $0, %r9d movl $0, %r8d movq 16(%rsp), %rdx movl $1, %ecx movq 28(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L50 .L47: leaq 4(%rsp), %rdi movl $2, %ecx movl $4, %edx movq 8(%rsp), %rsi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movl 4(%rsp), %eax movq 40(%rsp), %rdx subq %fs:40, %rdx jne .L51 addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L50: .cfi_restore_state movq 8(%rsp), %rdi call _Z50__device_stub__Z30findMaxWithoutDivergenceKernelPiPi jmp .L47 .L51: call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size _Z28cudaFindMaxWithoutDivergencePii, .-_Z28cudaFindMaxWithoutDivergencePii .globl _Z49__device_stub__Z29findMaxWithSharedMemoryKernelPiPi .type _Z49__device_stub__Z29findMaxWithSharedMemoryKernelPiPi, @function _Z49__device_stub__Z29findMaxWithSharedMemoryKernelPiPi: .LFB2095: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L56 .L52: movq 88(%rsp), %rax subq %fs:40, %rax jne .L57 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L56: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z29findMaxWithSharedMemoryKernelPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L52 .L57: call __stack_chk_fail@PLT .cfi_endproc .LFE2095: .size _Z49__device_stub__Z29findMaxWithSharedMemoryKernelPiPi, .-_Z49__device_stub__Z29findMaxWithSharedMemoryKernelPiPi .globl _Z29findMaxWithSharedMemoryKernelPi .type _Z29findMaxWithSharedMemoryKernelPi, @function _Z29findMaxWithSharedMemoryKernelPi: .LFB2096: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z49__device_stub__Z29findMaxWithSharedMemoryKernelPiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2096: .size _Z29findMaxWithSharedMemoryKernelPi, .-_Z29findMaxWithSharedMemoryKernelPi .globl _Z27cudaFindMaxWithSharedMemoryPii .type _Z27cudaFindMaxWithSharedMemoryPii, @function _Z27cudaFindMaxWithSharedMemoryPii: .LFB2060: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $48, %rsp .cfi_def_cfa_offset 80 movq %rdi, %r12 movl %esi, %ebx movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leal 0(,%rsi,4), %ebp movslq %ebp, %rbp leaq 8(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbp, %rdx movq %r12, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $512, 16(%rsp) movl $1, 20(%rsp) leal 511(%rbx), %eax testl %ebx, %ebx cmovns %ebx, %eax sarl $9, %eax movl %eax, 28(%rsp) movl $1, 32(%rsp) movl $0, %r9d movl $0, %r8d movq 16(%rsp), %rdx movl $1, %ecx movq 28(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L64 .L61: leaq 4(%rsp), %rdi movl $2, %ecx movl $4, %edx movq 8(%rsp), %rsi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movl 4(%rsp), %eax movq 40(%rsp), %rdx subq %fs:40, %rdx jne .L65 addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L64: .cfi_restore_state movq 8(%rsp), %rdi call _Z49__device_stub__Z29findMaxWithSharedMemoryKernelPiPi jmp .L61 .L65: call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size _Z27cudaFindMaxWithSharedMemoryPii, .-_Z27cudaFindMaxWithSharedMemoryPii .section .rodata.str1.1 .LC4: .string "Serial" .LC5: .string "Naive Parallel" .LC6: .string "Non Divergent Parallel" .LC7: .string "Shared Memory Parallel" .LC8: .string "Unrecognized!" .text .globl _Z20launchTestWithTimingii .type _Z20launchTestWithTimingii, @function _Z20launchTestWithTimingii: .LFB2064: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $48, %rsp .cfi_def_cfa_offset 96 movl %edi, %ebp movl %esi, %ebx movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl %esi, %edi call _Z15initializeArrayi movq %rax, %r12 movslq %ebx, %rax movl -4(%r12,%rax,4), %r14d movq %rsp, %rdi movl $0, %esi call gettimeofday@PLT cmpl $2, %ebp je .L67 jg .L68 testl %ebp, %ebp je .L69 cmpl $1, %ebp jne .L75 movl %ebx, %esi movq %r12, %rdi call _Z26cudaFindMaxOnDeviceNaivelyPii movl %eax, %r13d leaq .LC5(%rip), %rbp jmp .L71 .L75: leaq .LC8(%rip), %rbp jmp .L71 .L68: cmpl $3, %ebp jne .L76 movl %ebx, %esi movq %r12, %rdi call _Z27cudaFindMaxWithSharedMemoryPii movl %eax, %r13d leaq .LC7(%rip), %rbp jmp .L71 .L76: leaq .LC8(%rip), %rbp jmp .L71 .L69: movl %ebx, %esi movq %r12, %rdi call _Z13findMaxOnHostPii movl %eax, %r13d leaq .LC4(%rip), %rbp .L71: leaq 16(%rsp), %rdi movl $0, %esi call gettimeofday@PLT movq %r12, %rdi call free@PLT movq %rbp, %rdx movl %r13d, %esi movl %r14d, %edi call _Z19checkAndPrintResultiiPc movl %ebx, %r9d movq %rbp, %r8 movq 16(%rsp), %rdx movq 24(%rsp), %rcx movq (%rsp), %rdi movq 8(%rsp), %rsi call _Z11printTiming7timevalS_Pci movq 40(%rsp), %rax subq %fs:40, %rax jne .L77 addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L67: .cfi_restore_state movl %ebx, %esi movq %r12, %rdi call _Z28cudaFindMaxWithoutDivergencePii movl %eax, %r13d leaq .LC6(%rip), %rbp jmp .L71 .L77: call __stack_chk_fail@PLT .cfi_endproc .LFE2064: .size _Z20launchTestWithTimingii, .-_Z20launchTestWithTimingii .globl _Z7runTesti .type _Z7runTesti, @function _Z7runTesti: .LFB2065: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movl %edi, %ebx movl %edi, %esi movl $0, %edi call _Z20launchTestWithTimingii movl %ebx, %esi movl $1, %edi call _Z20launchTestWithTimingii movl %ebx, %esi movl $1, %edi call _Z20launchTestWithTimingii movl %ebx, %esi movl $2, %edi call _Z20launchTestWithTimingii movl %ebx, %esi movl $3, %edi call _Z20launchTestWithTimingii popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2065: .size _Z7runTesti, .-_Z7runTesti .globl main .type main, @function main: .LFB2066: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movl $1024, %edi call _Z7runTesti movl $4096, %edi call _Z7runTesti movl $0, %eax addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2066: .size main, .-main .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC9: .string "_Z29findMaxWithSharedMemoryKernelPi" .align 8 .LC10: .string "_Z30findMaxWithoutDivergenceKernelPi" .section .rodata.str1.1 .LC11: .string "_Z20findMaxNaivelyKernelPi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2098: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _Z29findMaxWithSharedMemoryKernelPi(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC10(%rip), %rdx movq %rdx, %rcx leaq _Z30findMaxWithoutDivergenceKernelPi(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC11(%rip), %rdx movq %rdx, %rcx leaq _Z20findMaxNaivelyKernelPi(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2098: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> /* reduction.cu A demonstration of array reduction using CUDA Created for GPU Architecture and Programming Spring 2012, New York University Copyright 2012 Guy Dickinson <guy.dickinson@nyu.edu> */ // Vanilla, sequential reduction on host // This *would* have a divergence problem if it were multithreaded int findMaxOnHost(int* A, int length) { for (int step = 1; step < length; step *= 2) { for (int i = 0; i < length; i += (2 * step)) { int candidate = A[i + step]; if (A[i] < candidate) { A[i] = candidate; } } } return A[0]; } __global__ void findMaxNaivelyKernel(int* A) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int step = 1; step < (blockDim.x * gridDim.x); step *= 2) { __syncthreads(); if (tid % (2 * step) == 0) { int candidate = A[tid + step]; if (A[tid] < candidate) { A[tid] = candidate; } } } } // Naively find the maximum element, without taking into account thread // diversion or memory efficiency int cudaFindMaxOnDeviceNaively(int* A, int length) { int size = length * sizeof(int); int ret; int* cudaArray; cudaMalloc(&cudaArray, size); cudaMemcpy(cudaArray, A, size, cudaMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length/512, 1); findMaxNaivelyKernel<<<dimGrid, dimBlock>>>(cudaArray); // Recover just the first element from the device to save time. cudaMemcpy(&ret, cudaArray, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(cudaArray); return ret; } __global__ void findMaxWithoutDivergenceKernel(int* A) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int step = blockDim.x * gridDim.x >> 1; step > 0; step >>= 1) { __syncthreads(); if (tid < step) { int candidate = A[tid + step]; if (A[tid] < candidate) { A[tid] = candidate; } } } } int cudaFindMaxWithoutDivergence(int* A, int length) { int size = length * sizeof(int); int ret; int* cudaArray; cudaMalloc(&cudaArray, size); cudaMemcpy(cudaArray, A, size, cudaMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length / 512, 1); findMaxWithoutDivergenceKernel<<<dimGrid, dimBlock>>>(cudaArray); // Recover just the first element from the device to save time. cudaMemcpy(&ret, cudaArray, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(cudaArray); return ret; } __global__ void findMaxWithSharedMemoryKernel(int* A) { //Static allocation like this is pretty lame but good enough for testing. __shared__ int sharedmem[512]; // copy this chunk into shared memory from global unsigned int threadIndex = threadIdx.x; unsigned int globalThreadId = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int nThreads = (blockDim.x * gridDim.x); sharedmem[threadIndex] = (globalThreadId < nThreads) ? A[globalThreadId] : 0; // Do the reduction in shared memory for (unsigned int step = blockDim.x >> 1; step > 0; step >>= 1) { __syncthreads(); if (threadIndex < step) { int candidate = sharedmem[threadIndex + step]; if (sharedmem[threadIndex] < candidate) { sharedmem[threadIndex] = candidate; } } } __syncthreads(); // Thread 0 within a block writes the result back to global memory if (threadIndex == 0) { A[blockIdx.x] = sharedmem[0]; } __syncthreads(); // Now the global data structure has as elements 0-gridDim.x filled with // the results which we need to reduce again. if (globalThreadId < gridDim.x) { for (unsigned int step = gridDim.x >> 1; step > 0; step >>= 1) { int candidate = A[globalThreadId + step]; if (A[globalThreadId] < candidate) { A[globalThreadId] = candidate; } } } } int cudaFindMaxWithSharedMemory(int* A, int length) { int size = length * sizeof(int); int ret; int* cudaArray; cudaMalloc(&cudaArray, size); cudaMemcpy(cudaArray, A, size, cudaMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length / 512, 1); findMaxWithSharedMemoryKernel<<<dimGrid, dimBlock>>>(cudaArray); // Recover just the first element from the device to save time. cudaMemcpy(&ret, cudaArray, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(cudaArray); return ret; } // Returns a pointer to an array of ints of size length. // For verification purposes, the biggest int will be at the end. int* initializeArray(int length) { // Set up an array of ints of the right length void *ptr; ptr = malloc(length * sizeof(int)); if (ptr == NULL) { // Handle allocation error } int* A = (int*) ptr; // Fill it with ints int j = 1; for (int i = 0; i < length; i++) { A[i] = j; j += 2; } return A; } void printTiming(timeval start, timeval end, char* desc, int length) { double elapsed = (end.tv_usec - start.tv_usec); printf("%s length %d: %g usec\n", desc, elapsed, length); } void checkAndPrintResult(int expected, int actual, char* desc) { if (expected == actual) { printf("%s passed check ", desc); } else { printf("%s failed check " , desc); } printf("(expected %d, got %d)\n", expected, actual); } // Wrapper function for launching each test component. void launchTestWithTiming( int testType, int length) { int actual; char* desc; timeval start, end; int* array = initializeArray(length); int expected = array[length - 1]; gettimeofday(&start, NULL); switch (testType) { // CPU case 0: desc = "Serial"; actual = findMaxOnHost(array, length); break; // Naive Parallel case 1: desc = "Naive Parallel"; actual = cudaFindMaxOnDeviceNaively(array, length); break; // Non Divergent Parallel case 2: desc = "Non Divergent Parallel"; actual = cudaFindMaxWithoutDivergence(array, length); break; // Shared Memory Parallel case 3: desc = "Shared Memory Parallel"; actual = cudaFindMaxWithSharedMemory(array, length); break; default: desc = "Unrecognized!"; break; } gettimeofday(&end, NULL); free(array); checkAndPrintResult(expected, actual, desc); printTiming(start, end, desc, length); } void runTest(int length) { launchTestWithTiming(0, length); launchTestWithTiming(1, length); launchTestWithTiming(1, length); launchTestWithTiming(2, length); launchTestWithTiming(3, length); } int main(void) { runTest(1024); runTest(4096); return 0; }
#include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> /* reduction.cu A demonstration of array reduction using CUDA Created for GPU Architecture and Programming Spring 2012, New York University Copyright 2012 Guy Dickinson <guy.dickinson@nyu.edu> */ // Vanilla, sequential reduction on host // This *would* have a divergence problem if it were multithreaded int findMaxOnHost(int* A, int length) { for (int step = 1; step < length; step *= 2) { for (int i = 0; i < length; i += (2 * step)) { int candidate = A[i + step]; if (A[i] < candidate) { A[i] = candidate; } } } return A[0]; } __global__ void findMaxNaivelyKernel(int* A) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int step = 1; step < (blockDim.x * gridDim.x); step *= 2) { __syncthreads(); if (tid % (2 * step) == 0) { int candidate = A[tid + step]; if (A[tid] < candidate) { A[tid] = candidate; } } } } // Naively find the maximum element, without taking into account thread // diversion or memory efficiency int cudaFindMaxOnDeviceNaively(int* A, int length) { int size = length * sizeof(int); int ret; int* hipArray; hipMalloc(&hipArray, size); hipMemcpy(hipArray, A, size, hipMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length/512, 1); findMaxNaivelyKernel<<<dimGrid, dimBlock>>>(hipArray); // Recover just the first element from the device to save time. hipMemcpy(&ret, hipArray, sizeof(int), hipMemcpyDeviceToHost); hipFree(hipArray); return ret; } __global__ void findMaxWithoutDivergenceKernel(int* A) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int step = blockDim.x * gridDim.x >> 1; step > 0; step >>= 1) { __syncthreads(); if (tid < step) { int candidate = A[tid + step]; if (A[tid] < candidate) { A[tid] = candidate; } } } } int cudaFindMaxWithoutDivergence(int* A, int length) { int size = length * sizeof(int); int ret; int* hipArray; hipMalloc(&hipArray, size); hipMemcpy(hipArray, A, size, hipMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length / 512, 1); findMaxWithoutDivergenceKernel<<<dimGrid, dimBlock>>>(hipArray); // Recover just the first element from the device to save time. hipMemcpy(&ret, hipArray, sizeof(int), hipMemcpyDeviceToHost); hipFree(hipArray); return ret; } __global__ void findMaxWithSharedMemoryKernel(int* A) { //Static allocation like this is pretty lame but good enough for testing. __shared__ int sharedmem[512]; // copy this chunk into shared memory from global unsigned int threadIndex = threadIdx.x; unsigned int globalThreadId = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int nThreads = (blockDim.x * gridDim.x); sharedmem[threadIndex] = (globalThreadId < nThreads) ? A[globalThreadId] : 0; // Do the reduction in shared memory for (unsigned int step = blockDim.x >> 1; step > 0; step >>= 1) { __syncthreads(); if (threadIndex < step) { int candidate = sharedmem[threadIndex + step]; if (sharedmem[threadIndex] < candidate) { sharedmem[threadIndex] = candidate; } } } __syncthreads(); // Thread 0 within a block writes the result back to global memory if (threadIndex == 0) { A[blockIdx.x] = sharedmem[0]; } __syncthreads(); // Now the global data structure has as elements 0-gridDim.x filled with // the results which we need to reduce again. if (globalThreadId < gridDim.x) { for (unsigned int step = gridDim.x >> 1; step > 0; step >>= 1) { int candidate = A[globalThreadId + step]; if (A[globalThreadId] < candidate) { A[globalThreadId] = candidate; } } } } int cudaFindMaxWithSharedMemory(int* A, int length) { int size = length * sizeof(int); int ret; int* hipArray; hipMalloc(&hipArray, size); hipMemcpy(hipArray, A, size, hipMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length / 512, 1); findMaxWithSharedMemoryKernel<<<dimGrid, dimBlock>>>(hipArray); // Recover just the first element from the device to save time. hipMemcpy(&ret, hipArray, sizeof(int), hipMemcpyDeviceToHost); hipFree(hipArray); return ret; } // Returns a pointer to an array of ints of size length. // For verification purposes, the biggest int will be at the end. int* initializeArray(int length) { // Set up an array of ints of the right length void *ptr; ptr = malloc(length * sizeof(int)); if (ptr == NULL) { // Handle allocation error } int* A = (int*) ptr; // Fill it with ints int j = 1; for (int i = 0; i < length; i++) { A[i] = j; j += 2; } return A; } void printTiming(timeval start, timeval end, char* desc, int length) { double elapsed = (end.tv_usec - start.tv_usec); printf("%s length %d: %g usec\n", desc, elapsed, length); } void checkAndPrintResult(int expected, int actual, char* desc) { if (expected == actual) { printf("%s passed check ", desc); } else { printf("%s failed check " , desc); } printf("(expected %d, got %d)\n", expected, actual); } // Wrapper function for launching each test component. void launchTestWithTiming( int testType, int length) { int actual; char* desc; timeval start, end; int* array = initializeArray(length); int expected = array[length - 1]; gettimeofday(&start, NULL); switch (testType) { // CPU case 0: desc = "Serial"; actual = findMaxOnHost(array, length); break; // Naive Parallel case 1: desc = "Naive Parallel"; actual = cudaFindMaxOnDeviceNaively(array, length); break; // Non Divergent Parallel case 2: desc = "Non Divergent Parallel"; actual = cudaFindMaxWithoutDivergence(array, length); break; // Shared Memory Parallel case 3: desc = "Shared Memory Parallel"; actual = cudaFindMaxWithSharedMemory(array, length); break; default: desc = "Unrecognized!"; break; } gettimeofday(&end, NULL); free(array); checkAndPrintResult(expected, actual, desc); printTiming(start, end, desc, length); } void runTest(int length) { launchTestWithTiming(0, length); launchTestWithTiming(1, length); launchTestWithTiming(1, length); launchTestWithTiming(2, length); launchTestWithTiming(3, length); } int main(void) { runTest(1024); runTest(4096); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> /* reduction.cu A demonstration of array reduction using CUDA Created for GPU Architecture and Programming Spring 2012, New York University Copyright 2012 Guy Dickinson <guy.dickinson@nyu.edu> */ // Vanilla, sequential reduction on host // This *would* have a divergence problem if it were multithreaded int findMaxOnHost(int* A, int length) { for (int step = 1; step < length; step *= 2) { for (int i = 0; i < length; i += (2 * step)) { int candidate = A[i + step]; if (A[i] < candidate) { A[i] = candidate; } } } return A[0]; } __global__ void findMaxNaivelyKernel(int* A) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int step = 1; step < (blockDim.x * gridDim.x); step *= 2) { __syncthreads(); if (tid % (2 * step) == 0) { int candidate = A[tid + step]; if (A[tid] < candidate) { A[tid] = candidate; } } } } // Naively find the maximum element, without taking into account thread // diversion or memory efficiency int cudaFindMaxOnDeviceNaively(int* A, int length) { int size = length * sizeof(int); int ret; int* hipArray; hipMalloc(&hipArray, size); hipMemcpy(hipArray, A, size, hipMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length/512, 1); findMaxNaivelyKernel<<<dimGrid, dimBlock>>>(hipArray); // Recover just the first element from the device to save time. hipMemcpy(&ret, hipArray, sizeof(int), hipMemcpyDeviceToHost); hipFree(hipArray); return ret; } __global__ void findMaxWithoutDivergenceKernel(int* A) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int step = blockDim.x * gridDim.x >> 1; step > 0; step >>= 1) { __syncthreads(); if (tid < step) { int candidate = A[tid + step]; if (A[tid] < candidate) { A[tid] = candidate; } } } } int cudaFindMaxWithoutDivergence(int* A, int length) { int size = length * sizeof(int); int ret; int* hipArray; hipMalloc(&hipArray, size); hipMemcpy(hipArray, A, size, hipMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length / 512, 1); findMaxWithoutDivergenceKernel<<<dimGrid, dimBlock>>>(hipArray); // Recover just the first element from the device to save time. hipMemcpy(&ret, hipArray, sizeof(int), hipMemcpyDeviceToHost); hipFree(hipArray); return ret; } __global__ void findMaxWithSharedMemoryKernel(int* A) { //Static allocation like this is pretty lame but good enough for testing. __shared__ int sharedmem[512]; // copy this chunk into shared memory from global unsigned int threadIndex = threadIdx.x; unsigned int globalThreadId = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int nThreads = (blockDim.x * gridDim.x); sharedmem[threadIndex] = (globalThreadId < nThreads) ? A[globalThreadId] : 0; // Do the reduction in shared memory for (unsigned int step = blockDim.x >> 1; step > 0; step >>= 1) { __syncthreads(); if (threadIndex < step) { int candidate = sharedmem[threadIndex + step]; if (sharedmem[threadIndex] < candidate) { sharedmem[threadIndex] = candidate; } } } __syncthreads(); // Thread 0 within a block writes the result back to global memory if (threadIndex == 0) { A[blockIdx.x] = sharedmem[0]; } __syncthreads(); // Now the global data structure has as elements 0-gridDim.x filled with // the results which we need to reduce again. if (globalThreadId < gridDim.x) { for (unsigned int step = gridDim.x >> 1; step > 0; step >>= 1) { int candidate = A[globalThreadId + step]; if (A[globalThreadId] < candidate) { A[globalThreadId] = candidate; } } } } int cudaFindMaxWithSharedMemory(int* A, int length) { int size = length * sizeof(int); int ret; int* hipArray; hipMalloc(&hipArray, size); hipMemcpy(hipArray, A, size, hipMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length / 512, 1); findMaxWithSharedMemoryKernel<<<dimGrid, dimBlock>>>(hipArray); // Recover just the first element from the device to save time. hipMemcpy(&ret, hipArray, sizeof(int), hipMemcpyDeviceToHost); hipFree(hipArray); return ret; } // Returns a pointer to an array of ints of size length. // For verification purposes, the biggest int will be at the end. int* initializeArray(int length) { // Set up an array of ints of the right length void *ptr; ptr = malloc(length * sizeof(int)); if (ptr == NULL) { // Handle allocation error } int* A = (int*) ptr; // Fill it with ints int j = 1; for (int i = 0; i < length; i++) { A[i] = j; j += 2; } return A; } void printTiming(timeval start, timeval end, char* desc, int length) { double elapsed = (end.tv_usec - start.tv_usec); printf("%s length %d: %g usec\n", desc, elapsed, length); } void checkAndPrintResult(int expected, int actual, char* desc) { if (expected == actual) { printf("%s passed check ", desc); } else { printf("%s failed check " , desc); } printf("(expected %d, got %d)\n", expected, actual); } // Wrapper function for launching each test component. void launchTestWithTiming( int testType, int length) { int actual; char* desc; timeval start, end; int* array = initializeArray(length); int expected = array[length - 1]; gettimeofday(&start, NULL); switch (testType) { // CPU case 0: desc = "Serial"; actual = findMaxOnHost(array, length); break; // Naive Parallel case 1: desc = "Naive Parallel"; actual = cudaFindMaxOnDeviceNaively(array, length); break; // Non Divergent Parallel case 2: desc = "Non Divergent Parallel"; actual = cudaFindMaxWithoutDivergence(array, length); break; // Shared Memory Parallel case 3: desc = "Shared Memory Parallel"; actual = cudaFindMaxWithSharedMemory(array, length); break; default: desc = "Unrecognized!"; break; } gettimeofday(&end, NULL); free(array); checkAndPrintResult(expected, actual, desc); printTiming(start, end, desc, length); } void runTest(int length) { launchTestWithTiming(0, length); launchTestWithTiming(1, length); launchTestWithTiming(1, length); launchTestWithTiming(2, length); launchTestWithTiming(3, length); } int main(void) { runTest(1024); runTest(4096); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z20findMaxNaivelyKernelPi .globl _Z20findMaxNaivelyKernelPi .p2align 8 .type _Z20findMaxNaivelyKernelPi,@function _Z20findMaxNaivelyKernelPi: s_clause 0x1 s_load_b32 s2, s[0:1], 0x14 s_load_b32 s4, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_i32 s2, s4, s3 s_cmp_lt_u32 s2, 2 s_cbranch_scc1 .LBB0_6 s_load_b64 s[0:1], s[0:1], 0x0 v_mov_b32_e32 v1, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_mad_u64_u32 v[2:3], null, s15, s3, v[0:1] v_mov_b32_e32 v3, v1 s_mov_b32 s3, 1 v_lshlrev_b64 v[3:4], 2, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s0, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo s_set_inst_prefetch_distance 0x1 s_branch .LBB0_3 .p2align 6 .LBB0_2: s_or_b32 exec_lo, exec_lo, s4 s_cmp_ge_u32 s3, s2 s_cbranch_scc1 .LBB0_6 .LBB0_3: s_mov_b32 s5, s3 s_lshl_b32 s3, s3, 1 s_delay_alu instid0(SALU_CYCLE_1) s_add_i32 s4, s3, -1 s_waitcnt_vscnt null, 0x0 s_barrier v_and_b32_e32 v0, s4, v2 s_mov_b32 s4, exec_lo buffer_gl0_inv v_cmpx_eq_u32_e32 0, v0 s_cbranch_execz .LBB0_2 v_add_nc_u32_e32 v0, s5, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[5:6], 2, v[0:1] v_add_co_u32 v5, vcc_lo, s0, v5 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v6, vcc_lo, s1, v6, vcc_lo s_clause 0x1 global_load_b32 v0, v[5:6], off global_load_b32 v5, v[3:4], off s_waitcnt vmcnt(0) v_cmp_lt_i32_e32 vcc_lo, v5, v0 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_2 global_store_b32 v[3:4], v0, off s_branch .LBB0_2 .LBB0_6: s_set_inst_prefetch_distance 0x2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z20findMaxNaivelyKernelPi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 264 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z20findMaxNaivelyKernelPi, .Lfunc_end0-_Z20findMaxNaivelyKernelPi .section .AMDGPU.csdata,"",@progbits .text .protected _Z30findMaxWithoutDivergenceKernelPi .globl _Z30findMaxWithoutDivergenceKernelPi .p2align 8 .type _Z30findMaxWithoutDivergenceKernelPi,@function _Z30findMaxWithoutDivergenceKernelPi: s_clause 0x1 s_load_b32 s2, s[0:1], 0x14 s_load_b32 s4, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_i32 s2, s4, s3 s_cmp_lt_u32 s2, 2 s_cbranch_scc1 .LBB1_6 s_load_b64 s[0:1], s[0:1], 0x0 v_mov_b32_e32 v1, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[2:3], null, s15, s3, v[0:1] v_mov_b32_e32 v3, v1 v_lshlrev_b64 v[3:4], 2, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s0, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo s_set_inst_prefetch_distance 0x1 s_branch .LBB1_3 .p2align 6 .LBB1_2: s_or_b32 exec_lo, exec_lo, s4 s_cmp_lt_u32 s3, 4 s_cbranch_scc1 .LBB1_6 .LBB1_3: s_mov_b32 s3, s2 s_lshr_b32 s2, s2, 1 s_mov_b32 s4, exec_lo s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv v_cmpx_gt_u32_e64 s2, v2 s_cbranch_execz .LBB1_2 v_add_nc_u32_e32 v0, s2, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[5:6], 2, v[0:1] v_add_co_u32 v5, vcc_lo, s0, v5 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v6, vcc_lo, s1, v6, vcc_lo s_clause 0x1 global_load_b32 v0, v[5:6], off global_load_b32 v5, v[3:4], off s_waitcnt vmcnt(0) v_cmp_lt_i32_e32 vcc_lo, v5, v0 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB1_2 global_store_b32 v[3:4], v0, off s_branch .LBB1_2 .LBB1_6: s_set_inst_prefetch_distance 0x2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z30findMaxWithoutDivergenceKernelPi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 264 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z30findMaxWithoutDivergenceKernelPi, .Lfunc_end1-_Z30findMaxWithoutDivergenceKernelPi .section .AMDGPU.csdata,"",@progbits .text .protected _Z29findMaxWithSharedMemoryKernelPi .globl _Z29findMaxWithSharedMemoryKernelPi .p2align 8 .type _Z29findMaxWithSharedMemoryKernelPi,@function _Z29findMaxWithSharedMemoryKernelPi: s_clause 0x2 s_load_b32 s3, s[0:1], 0x14 s_load_b32 s4, s[0:1], 0x8 s_load_b64 s[0:1], s[0:1], 0x0 s_mov_b32 s2, s15 v_mov_b32_e32 v3, 0 s_waitcnt lgkmcnt(0) s_and_b32 s3, s3, 0xffff s_delay_alu instid0(SALU_CYCLE_1) v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1] s_mul_i32 s5, s4, s3 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_cmp_gt_u32_e32 vcc_lo, s5, v1 s_and_saveexec_b32 s5, vcc_lo s_cbranch_execz .LBB2_2 v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[1:2] v_add_co_u32 v2, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo global_load_b32 v3, v[2:3], off .LBB2_2: s_or_b32 exec_lo, exec_lo, s5 v_lshlrev_b32_e32 v2, 2, v0 s_cmp_lt_u32 s3, 2 s_waitcnt vmcnt(0) ds_store_b32 v2, v3 s_cbranch_scc0 .LBB2_8 .LBB2_3: s_mov_b32 s3, 0 s_mov_b32 s5, exec_lo s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv v_cmpx_eq_u32_e32 0, v0 s_cbranch_execz .LBB2_5 v_mov_b32_e32 v0, 0 s_lshl_b64 s[2:3], s[2:3], 2 s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s2, s0, s2 s_addc_u32 s3, s1, s3 ds_load_b32 v2, v0 s_waitcnt lgkmcnt(0) global_store_b32 v0, v2, s[2:3] .LBB2_5: s_or_b32 exec_lo, exec_lo, s5 v_max_u32_e32 v0, 1, v1 s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv s_mov_b32 s2, exec_lo v_cmpx_gt_u32_e64 s4, v0 s_cbranch_execz .LBB2_14 v_mov_b32_e32 v3, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mov_b32_e32 v2, v3 v_lshlrev_b64 v[4:5], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v4, vcc_lo, s0, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo global_load_b32 v0, v[4:5], off s_branch .LBB2_12 .p2align 6 .LBB2_7: s_or_b32 exec_lo, exec_lo, s6 s_cmp_lt_u32 s5, 4 s_cbranch_scc1 .LBB2_3 .LBB2_8: s_mov_b32 s5, s3 s_lshr_b32 s3, s3, 1 s_mov_b32 s6, exec_lo s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv v_cmpx_gt_u32_e64 s3, v0 s_cbranch_execz .LBB2_7 v_add_lshl_u32 v3, s3, v0, 2 ds_load_b32 v3, v3 ds_load_b32 v4, v2 s_waitcnt lgkmcnt(0) v_cmp_lt_i32_e32 vcc_lo, v4, v3 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB2_7 ds_store_b32 v2, v3 s_branch .LBB2_7 .p2align 6 .LBB2_11: s_or_b32 exec_lo, exec_lo, s3 s_cmp_gt_u32 s4, 3 s_mov_b32 s4, s2 s_cbranch_scc0 .LBB2_14 .LBB2_12: s_lshr_b32 s2, s4, 1 s_mov_b32 s3, exec_lo v_add_nc_u32_e32 v2, s2, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[6:7], 2, v[2:3] v_add_co_u32 v6, vcc_lo, s0, v6 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v7, vcc_lo, s1, v7, vcc_lo global_load_b32 v2, v[6:7], off s_waitcnt vmcnt(0) v_cmpx_lt_i32_e64 v0, v2 s_cbranch_execz .LBB2_11 v_mov_b32_e32 v0, v2 global_store_b32 v[4:5], v2, off s_branch .LBB2_11 .LBB2_14: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z29findMaxWithSharedMemoryKernelPi .amdhsa_group_segment_fixed_size 2048 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 264 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z29findMaxWithSharedMemoryKernelPi, .Lfunc_end2-_Z29findMaxWithSharedMemoryKernelPi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: hidden_block_count_x - .offset: 12 .size: 4 .value_kind: hidden_block_count_y - .offset: 16 .size: 4 .value_kind: hidden_block_count_z - .offset: 20 .size: 2 .value_kind: hidden_group_size_x - .offset: 22 .size: 2 .value_kind: hidden_group_size_y - .offset: 24 .size: 2 .value_kind: hidden_group_size_z - .offset: 26 .size: 2 .value_kind: hidden_remainder_x - .offset: 28 .size: 2 .value_kind: hidden_remainder_y - .offset: 30 .size: 2 .value_kind: hidden_remainder_z - .offset: 48 .size: 8 .value_kind: hidden_global_offset_x - .offset: 56 .size: 8 .value_kind: hidden_global_offset_y - .offset: 64 .size: 8 .value_kind: hidden_global_offset_z - .offset: 72 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 264 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z20findMaxNaivelyKernelPi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z20findMaxNaivelyKernelPi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: hidden_block_count_x - .offset: 12 .size: 4 .value_kind: hidden_block_count_y - .offset: 16 .size: 4 .value_kind: hidden_block_count_z - .offset: 20 .size: 2 .value_kind: hidden_group_size_x - .offset: 22 .size: 2 .value_kind: hidden_group_size_y - .offset: 24 .size: 2 .value_kind: hidden_group_size_z - .offset: 26 .size: 2 .value_kind: hidden_remainder_x - .offset: 28 .size: 2 .value_kind: hidden_remainder_y - .offset: 30 .size: 2 .value_kind: hidden_remainder_z - .offset: 48 .size: 8 .value_kind: hidden_global_offset_x - .offset: 56 .size: 8 .value_kind: hidden_global_offset_y - .offset: 64 .size: 8 .value_kind: hidden_global_offset_z - .offset: 72 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 264 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z30findMaxWithoutDivergenceKernelPi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z30findMaxWithoutDivergenceKernelPi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: hidden_block_count_x - .offset: 12 .size: 4 .value_kind: hidden_block_count_y - .offset: 16 .size: 4 .value_kind: hidden_block_count_z - .offset: 20 .size: 2 .value_kind: hidden_group_size_x - .offset: 22 .size: 2 .value_kind: hidden_group_size_y - .offset: 24 .size: 2 .value_kind: hidden_group_size_z - .offset: 26 .size: 2 .value_kind: hidden_remainder_x - .offset: 28 .size: 2 .value_kind: hidden_remainder_y - .offset: 30 .size: 2 .value_kind: hidden_remainder_z - .offset: 48 .size: 8 .value_kind: hidden_global_offset_x - .offset: 56 .size: 8 .value_kind: hidden_global_offset_y - .offset: 64 .size: 8 .value_kind: hidden_global_offset_z - .offset: 72 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 2048 .kernarg_segment_align: 8 .kernarg_segment_size: 264 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z29findMaxWithSharedMemoryKernelPi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z29findMaxWithSharedMemoryKernelPi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> /* reduction.cu A demonstration of array reduction using CUDA Created for GPU Architecture and Programming Spring 2012, New York University Copyright 2012 Guy Dickinson <guy.dickinson@nyu.edu> */ // Vanilla, sequential reduction on host // This *would* have a divergence problem if it were multithreaded int findMaxOnHost(int* A, int length) { for (int step = 1; step < length; step *= 2) { for (int i = 0; i < length; i += (2 * step)) { int candidate = A[i + step]; if (A[i] < candidate) { A[i] = candidate; } } } return A[0]; } __global__ void findMaxNaivelyKernel(int* A) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int step = 1; step < (blockDim.x * gridDim.x); step *= 2) { __syncthreads(); if (tid % (2 * step) == 0) { int candidate = A[tid + step]; if (A[tid] < candidate) { A[tid] = candidate; } } } } // Naively find the maximum element, without taking into account thread // diversion or memory efficiency int cudaFindMaxOnDeviceNaively(int* A, int length) { int size = length * sizeof(int); int ret; int* hipArray; hipMalloc(&hipArray, size); hipMemcpy(hipArray, A, size, hipMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length/512, 1); findMaxNaivelyKernel<<<dimGrid, dimBlock>>>(hipArray); // Recover just the first element from the device to save time. hipMemcpy(&ret, hipArray, sizeof(int), hipMemcpyDeviceToHost); hipFree(hipArray); return ret; } __global__ void findMaxWithoutDivergenceKernel(int* A) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int step = blockDim.x * gridDim.x >> 1; step > 0; step >>= 1) { __syncthreads(); if (tid < step) { int candidate = A[tid + step]; if (A[tid] < candidate) { A[tid] = candidate; } } } } int cudaFindMaxWithoutDivergence(int* A, int length) { int size = length * sizeof(int); int ret; int* hipArray; hipMalloc(&hipArray, size); hipMemcpy(hipArray, A, size, hipMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length / 512, 1); findMaxWithoutDivergenceKernel<<<dimGrid, dimBlock>>>(hipArray); // Recover just the first element from the device to save time. hipMemcpy(&ret, hipArray, sizeof(int), hipMemcpyDeviceToHost); hipFree(hipArray); return ret; } __global__ void findMaxWithSharedMemoryKernel(int* A) { //Static allocation like this is pretty lame but good enough for testing. __shared__ int sharedmem[512]; // copy this chunk into shared memory from global unsigned int threadIndex = threadIdx.x; unsigned int globalThreadId = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int nThreads = (blockDim.x * gridDim.x); sharedmem[threadIndex] = (globalThreadId < nThreads) ? A[globalThreadId] : 0; // Do the reduction in shared memory for (unsigned int step = blockDim.x >> 1; step > 0; step >>= 1) { __syncthreads(); if (threadIndex < step) { int candidate = sharedmem[threadIndex + step]; if (sharedmem[threadIndex] < candidate) { sharedmem[threadIndex] = candidate; } } } __syncthreads(); // Thread 0 within a block writes the result back to global memory if (threadIndex == 0) { A[blockIdx.x] = sharedmem[0]; } __syncthreads(); // Now the global data structure has as elements 0-gridDim.x filled with // the results which we need to reduce again. if (globalThreadId < gridDim.x) { for (unsigned int step = gridDim.x >> 1; step > 0; step >>= 1) { int candidate = A[globalThreadId + step]; if (A[globalThreadId] < candidate) { A[globalThreadId] = candidate; } } } } int cudaFindMaxWithSharedMemory(int* A, int length) { int size = length * sizeof(int); int ret; int* hipArray; hipMalloc(&hipArray, size); hipMemcpy(hipArray, A, size, hipMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(length / 512, 1); findMaxWithSharedMemoryKernel<<<dimGrid, dimBlock>>>(hipArray); // Recover just the first element from the device to save time. hipMemcpy(&ret, hipArray, sizeof(int), hipMemcpyDeviceToHost); hipFree(hipArray); return ret; } // Returns a pointer to an array of ints of size length. // For verification purposes, the biggest int will be at the end. int* initializeArray(int length) { // Set up an array of ints of the right length void *ptr; ptr = malloc(length * sizeof(int)); if (ptr == NULL) { // Handle allocation error } int* A = (int*) ptr; // Fill it with ints int j = 1; for (int i = 0; i < length; i++) { A[i] = j; j += 2; } return A; } void printTiming(timeval start, timeval end, char* desc, int length) { double elapsed = (end.tv_usec - start.tv_usec); printf("%s length %d: %g usec\n", desc, elapsed, length); } void checkAndPrintResult(int expected, int actual, char* desc) { if (expected == actual) { printf("%s passed check ", desc); } else { printf("%s failed check " , desc); } printf("(expected %d, got %d)\n", expected, actual); } // Wrapper function for launching each test component. void launchTestWithTiming( int testType, int length) { int actual; char* desc; timeval start, end; int* array = initializeArray(length); int expected = array[length - 1]; gettimeofday(&start, NULL); switch (testType) { // CPU case 0: desc = "Serial"; actual = findMaxOnHost(array, length); break; // Naive Parallel case 1: desc = "Naive Parallel"; actual = cudaFindMaxOnDeviceNaively(array, length); break; // Non Divergent Parallel case 2: desc = "Non Divergent Parallel"; actual = cudaFindMaxWithoutDivergence(array, length); break; // Shared Memory Parallel case 3: desc = "Shared Memory Parallel"; actual = cudaFindMaxWithSharedMemory(array, length); break; default: desc = "Unrecognized!"; break; } gettimeofday(&end, NULL); free(array); checkAndPrintResult(expected, actual, desc); printTiming(start, end, desc, length); } void runTest(int length) { launchTestWithTiming(0, length); launchTestWithTiming(1, length); launchTestWithTiming(1, length); launchTestWithTiming(2, length); launchTestWithTiming(3, length); } int main(void) { runTest(1024); runTest(4096); return 0; }
.text .file "reduction.hip" .globl _Z13findMaxOnHostPii # -- Begin function _Z13findMaxOnHostPii .p2align 4, 0x90 .type _Z13findMaxOnHostPii,@function _Z13findMaxOnHostPii: # @_Z13findMaxOnHostPii .cfi_startproc # %bb.0: cmpl $2, %esi jl .LBB0_7 # %bb.1: # %.preheader.lr.ph movl %esi, %eax movl $1, %ecx jmp .LBB0_2 .p2align 4, 0x90 .LBB0_6: # %._crit_edge # in Loop: Header=BB0_2 Depth=1 cmpl %esi, %ecx jge .LBB0_7 .LBB0_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB0_3 Depth 2 # kill: def $ecx killed $ecx def $rcx movslq %ecx, %r8 addl %ecx, %ecx movslq %ecx, %rdx leaq (%rdi,%r8,4), %r8 xorl %r9d, %r9d jmp .LBB0_3 .p2align 4, 0x90 .LBB0_5: # in Loop: Header=BB0_3 Depth=2 addq %rdx, %r9 cmpq %rax, %r9 jge .LBB0_6 .LBB0_3: # Parent Loop BB0_2 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r8,%r9,4), %r10d cmpl %r10d, (%rdi,%r9,4) jge .LBB0_5 # %bb.4: # in Loop: Header=BB0_3 Depth=2 movl %r10d, (%rdi,%r9,4) jmp .LBB0_5 .LBB0_7: # %._crit_edge21 movl (%rdi), %eax retq .Lfunc_end0: .size _Z13findMaxOnHostPii, .Lfunc_end0-_Z13findMaxOnHostPii .cfi_endproc # -- End function .globl _Z35__device_stub__findMaxNaivelyKernelPi # -- Begin function _Z35__device_stub__findMaxNaivelyKernelPi .p2align 4, 0x90 .type _Z35__device_stub__findMaxNaivelyKernelPi,@function _Z35__device_stub__findMaxNaivelyKernelPi: # @_Z35__device_stub__findMaxNaivelyKernelPi .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z20findMaxNaivelyKernelPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end1: .size _Z35__device_stub__findMaxNaivelyKernelPi, .Lfunc_end1-_Z35__device_stub__findMaxNaivelyKernelPi .cfi_endproc # -- End function .globl _Z26cudaFindMaxOnDeviceNaivelyPii # -- Begin function _Z26cudaFindMaxOnDeviceNaivelyPii .p2align 4, 0x90 .type _Z26cudaFindMaxOnDeviceNaivelyPii,@function _Z26cudaFindMaxOnDeviceNaivelyPii: # @_Z26cudaFindMaxOnDeviceNaivelyPii .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $80, %rsp .cfi_def_cfa_offset 112 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %esi, %ebx movq %rdi, %r14 leal (,%rbx,4), %eax movslq %eax, %r15 leaq 8(%rsp), %rdi movq %r15, %rsi callq hipMalloc movq 8(%rsp), %rdi movq %r14, %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy leal 511(%rbx), %edi testl %ebx, %ebx cmovnsl %ebx, %edi sarl $9, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $512, %rdx # imm = 0x200 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 72(%rsp) leaq 72(%rsp), %rax movq %rax, 32(%rsp) leaq 16(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 16(%rsp), %rsi movl 24(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 32(%rsp), %r9 movl $_Z20findMaxNaivelyKernelPi, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_2: movq 8(%rsp), %rsi leaq 16(%rsp), %rdi movl $4, %edx movl $2, %ecx callq hipMemcpy movq 8(%rsp), %rdi callq hipFree movl 16(%rsp), %eax addq $80, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z26cudaFindMaxOnDeviceNaivelyPii, .Lfunc_end2-_Z26cudaFindMaxOnDeviceNaivelyPii .cfi_endproc # -- End function .globl _Z45__device_stub__findMaxWithoutDivergenceKernelPi # -- Begin function _Z45__device_stub__findMaxWithoutDivergenceKernelPi .p2align 4, 0x90 .type _Z45__device_stub__findMaxWithoutDivergenceKernelPi,@function _Z45__device_stub__findMaxWithoutDivergenceKernelPi: # @_Z45__device_stub__findMaxWithoutDivergenceKernelPi .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z30findMaxWithoutDivergenceKernelPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end3: .size _Z45__device_stub__findMaxWithoutDivergenceKernelPi, .Lfunc_end3-_Z45__device_stub__findMaxWithoutDivergenceKernelPi .cfi_endproc # -- End function .globl _Z28cudaFindMaxWithoutDivergencePii # -- Begin function _Z28cudaFindMaxWithoutDivergencePii .p2align 4, 0x90 .type _Z28cudaFindMaxWithoutDivergencePii,@function _Z28cudaFindMaxWithoutDivergencePii: # @_Z28cudaFindMaxWithoutDivergencePii .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $80, %rsp .cfi_def_cfa_offset 112 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %esi, %ebx movq %rdi, %r14 leal (,%rbx,4), %eax movslq %eax, %r15 leaq 8(%rsp), %rdi movq %r15, %rsi callq hipMalloc movq 8(%rsp), %rdi movq %r14, %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy leal 511(%rbx), %edi testl %ebx, %ebx cmovnsl %ebx, %edi sarl $9, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $512, %rdx # imm = 0x200 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 72(%rsp) leaq 72(%rsp), %rax movq %rax, 32(%rsp) leaq 16(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 16(%rsp), %rsi movl 24(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 32(%rsp), %r9 movl $_Z30findMaxWithoutDivergenceKernelPi, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_2: movq 8(%rsp), %rsi leaq 16(%rsp), %rdi movl $4, %edx movl $2, %ecx callq hipMemcpy movq 8(%rsp), %rdi callq hipFree movl 16(%rsp), %eax addq $80, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _Z28cudaFindMaxWithoutDivergencePii, .Lfunc_end4-_Z28cudaFindMaxWithoutDivergencePii .cfi_endproc # -- End function .globl _Z44__device_stub__findMaxWithSharedMemoryKernelPi # -- Begin function _Z44__device_stub__findMaxWithSharedMemoryKernelPi .p2align 4, 0x90 .type _Z44__device_stub__findMaxWithSharedMemoryKernelPi,@function _Z44__device_stub__findMaxWithSharedMemoryKernelPi: # @_Z44__device_stub__findMaxWithSharedMemoryKernelPi .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z29findMaxWithSharedMemoryKernelPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end5: .size _Z44__device_stub__findMaxWithSharedMemoryKernelPi, .Lfunc_end5-_Z44__device_stub__findMaxWithSharedMemoryKernelPi .cfi_endproc # -- End function .globl _Z27cudaFindMaxWithSharedMemoryPii # -- Begin function _Z27cudaFindMaxWithSharedMemoryPii .p2align 4, 0x90 .type _Z27cudaFindMaxWithSharedMemoryPii,@function _Z27cudaFindMaxWithSharedMemoryPii: # @_Z27cudaFindMaxWithSharedMemoryPii .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $80, %rsp .cfi_def_cfa_offset 112 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %esi, %ebx movq %rdi, %r14 leal (,%rbx,4), %eax movslq %eax, %r15 leaq 8(%rsp), %rdi movq %r15, %rsi callq hipMalloc movq 8(%rsp), %rdi movq %r14, %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy leal 511(%rbx), %edi testl %ebx, %ebx cmovnsl %ebx, %edi sarl $9, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $512, %rdx # imm = 0x200 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB6_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 72(%rsp) leaq 72(%rsp), %rax movq %rax, 32(%rsp) leaq 16(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 16(%rsp), %rsi movl 24(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 32(%rsp), %r9 movl $_Z29findMaxWithSharedMemoryKernelPi, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB6_2: movq 8(%rsp), %rsi leaq 16(%rsp), %rdi movl $4, %edx movl $2, %ecx callq hipMemcpy movq 8(%rsp), %rdi callq hipFree movl 16(%rsp), %eax addq $80, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end6: .size _Z27cudaFindMaxWithSharedMemoryPii, .Lfunc_end6-_Z27cudaFindMaxWithSharedMemoryPii .cfi_endproc # -- End function .globl _Z15initializeArrayi # -- Begin function _Z15initializeArrayi .p2align 4, 0x90 .type _Z15initializeArrayi,@function _Z15initializeArrayi: # @_Z15initializeArrayi .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl %edi, %ebx movslq %edi, %r14 leaq (,%r14,4), %rdi callq malloc testl %r14d, %r14d jle .LBB7_3 # %bb.1: # %.lr.ph.preheader movl %ebx, %ecx addq %rcx, %rcx xorl %edx, %edx .p2align 4, 0x90 .LBB7_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 leal 1(%rdx), %esi movl %esi, (%rax,%rdx,2) addq $2, %rdx cmpq %rdx, %rcx jne .LBB7_2 .LBB7_3: # %._crit_edge addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .Lfunc_end7: .size _Z15initializeArrayi, .Lfunc_end7-_Z15initializeArrayi .cfi_endproc # -- End function .globl _Z11printTiming7timevalS_Pci # -- Begin function _Z11printTiming7timevalS_Pci .p2align 4, 0x90 .type _Z11printTiming7timevalS_Pci,@function _Z11printTiming7timevalS_Pci: # @_Z11printTiming7timevalS_Pci .cfi_startproc # %bb.0: movl %r9d, %edx subq %rsi, %rcx cvtsi2sd %rcx, %xmm0 movl $.L.str, %edi movq %r8, %rsi movb $1, %al jmp printf # TAILCALL .Lfunc_end8: .size _Z11printTiming7timevalS_Pci, .Lfunc_end8-_Z11printTiming7timevalS_Pci .cfi_endproc # -- End function .globl _Z19checkAndPrintResultiiPc # -- Begin function _Z19checkAndPrintResultiiPc .p2align 4, 0x90 .type _Z19checkAndPrintResultiiPc,@function _Z19checkAndPrintResultiiPc: # @_Z19checkAndPrintResultiiPc .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %rbp, -16 movl %esi, %ebx movl %edi, %ebp cmpl %esi, %edi movl $.L.str.1, %eax movl $.L.str.2, %edi cmoveq %rax, %rdi movq %rdx, %rsi xorl %eax, %eax callq printf movl $.L.str.3, %edi movl %ebp, %esi movl %ebx, %edx xorl %eax, %eax addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp printf # TAILCALL .Lfunc_end9: .size _Z19checkAndPrintResultiiPc, .Lfunc_end9-_Z19checkAndPrintResultiiPc .cfi_endproc # -- End function .globl _Z20launchTestWithTimingii # -- Begin function _Z20launchTestWithTimingii .p2align 4, 0x90 .type _Z20launchTestWithTimingii,@function _Z20launchTestWithTimingii: # @_Z20launchTestWithTimingii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $32, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %esi, %ebx movl %edi, %r15d movslq %esi, %r12 leaq (,%r12,4), %rdi callq malloc movq %rax, %r14 testl %r12d, %r12d jle .LBB10_3 # %bb.1: # %.lr.ph.preheader.i movl %ebx, %eax addq %rax, %rax xorl %ecx, %ecx .p2align 4, 0x90 .LBB10_2: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 leal 1(%rcx), %edx movl %edx, (%r14,%rcx,2) addq $2, %rcx cmpq %rcx, %rax jne .LBB10_2 .LBB10_3: # %_Z15initializeArrayi.exit movl -4(%r14,%r12,4), %ebp leaq 16(%rsp), %rdi xorl %esi, %esi callq gettimeofday cmpl $3, %r15d ja .LBB10_4 # %bb.5: # %_Z15initializeArrayi.exit movl %r15d, %eax jmpq *.LJTI10_0(,%rax,8) .LBB10_6: cmpl $2, %ebx jl .LBB10_13 # %bb.7: # %.preheader.lr.ph.i movl %ebx, %eax movl $1, %ecx jmp .LBB10_8 .p2align 4, 0x90 .LBB10_12: # %._crit_edge.i # in Loop: Header=BB10_8 Depth=1 cmpl %ebx, %ecx jge .LBB10_13 .LBB10_8: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB10_9 Depth 2 # kill: def $ecx killed $ecx def $rcx movslq %ecx, %rsi addl %ecx, %ecx movslq %ecx, %rdx leaq (%r14,%rsi,4), %rsi xorl %edi, %edi jmp .LBB10_9 .p2align 4, 0x90 .LBB10_11: # in Loop: Header=BB10_9 Depth=2 addq %rdx, %rdi cmpq %rax, %rdi jge .LBB10_12 .LBB10_9: # Parent Loop BB10_8 Depth=1 # => This Inner Loop Header: Depth=2 movl (%rsi,%rdi,4), %r8d cmpl %r8d, (%r14,%rdi,4) jge .LBB10_11 # %bb.10: # in Loop: Header=BB10_9 Depth=2 movl %r8d, (%r14,%rdi,4) jmp .LBB10_11 .LBB10_15: movq %r14, %rdi movl %ebx, %esi callq _Z28cudaFindMaxWithoutDivergencePii movl %eax, %r12d movl $.L.str.6, %r15d jmp .LBB10_17 .LBB10_16: movq %r14, %rdi movl %ebx, %esi callq _Z27cudaFindMaxWithSharedMemoryPii movl %eax, %r12d movl $.L.str.7, %r15d jmp .LBB10_17 .LBB10_14: movq %r14, %rdi movl %ebx, %esi callq _Z26cudaFindMaxOnDeviceNaivelyPii movl %eax, %r12d movl $.L.str.5, %r15d jmp .LBB10_17 .LBB10_13: # %_Z13findMaxOnHostPii.exit movl (%r14), %r12d movl $.L.str.4, %r15d jmp .LBB10_17 .LBB10_4: movl $.L.str.8, %r15d # implicit-def: $r12d .LBB10_17: # %_Z19checkAndPrintResultiiPc.exit movq %rsp, %rdi xorl %esi, %esi callq gettimeofday movq %r14, %rdi callq free cmpl %r12d, %ebp movl $.L.str.1, %eax movl $.L.str.2, %edi cmoveq %rax, %rdi movq %r15, %rsi xorl %eax, %eax callq printf movl $.L.str.3, %edi movl %ebp, %esi movl %r12d, %edx xorl %eax, %eax callq printf movq 8(%rsp), %rax subq 24(%rsp), %rax cvtsi2sd %rax, %xmm0 movl $.L.str, %edi movq %r15, %rsi movl %ebx, %edx movb $1, %al addq $32, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp printf # TAILCALL .Lfunc_end10: .size _Z20launchTestWithTimingii, .Lfunc_end10-_Z20launchTestWithTimingii .cfi_endproc .section .rodata,"a",@progbits .p2align 3, 0x0 .LJTI10_0: .quad .LBB10_6 .quad .LBB10_14 .quad .LBB10_15 .quad .LBB10_16 # -- End function .text .globl _Z7runTesti # -- Begin function _Z7runTesti .p2align 4, 0x90 .type _Z7runTesti,@function _Z7runTesti: # @_Z7runTesti .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 movl %edi, %ebx xorl %edi, %edi movl %ebx, %esi callq _Z20launchTestWithTimingii movl $1, %edi movl %ebx, %esi callq _Z20launchTestWithTimingii movl $1, %edi movl %ebx, %esi callq _Z20launchTestWithTimingii movl $2, %edi movl %ebx, %esi callq _Z20launchTestWithTimingii movl $3, %edi movl %ebx, %esi popq %rbx .cfi_def_cfa_offset 8 jmp _Z20launchTestWithTimingii # TAILCALL .Lfunc_end11: .size _Z7runTesti, .Lfunc_end11-_Z7runTesti .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rax .cfi_def_cfa_offset 16 xorl %edi, %edi movl $1024, %esi # imm = 0x400 callq _Z20launchTestWithTimingii movl $1, %edi movl $1024, %esi # imm = 0x400 callq _Z20launchTestWithTimingii movl $1, %edi movl $1024, %esi # imm = 0x400 callq _Z20launchTestWithTimingii movl $2, %edi movl $1024, %esi # imm = 0x400 callq _Z20launchTestWithTimingii movl $3, %edi movl $1024, %esi # imm = 0x400 callq _Z20launchTestWithTimingii xorl %edi, %edi movl $4096, %esi # imm = 0x1000 callq _Z20launchTestWithTimingii movl $1, %edi movl $4096, %esi # imm = 0x1000 callq _Z20launchTestWithTimingii movl $1, %edi movl $4096, %esi # imm = 0x1000 callq _Z20launchTestWithTimingii movl $2, %edi movl $4096, %esi # imm = 0x1000 callq _Z20launchTestWithTimingii movl $3, %edi movl $4096, %esi # imm = 0x1000 callq _Z20launchTestWithTimingii xorl %eax, %eax popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end12: .size main, .Lfunc_end12-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB13_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB13_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z20findMaxNaivelyKernelPi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z30findMaxWithoutDivergenceKernelPi, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z29findMaxWithSharedMemoryKernelPi, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end13: .size __hip_module_ctor, .Lfunc_end13-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB14_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB14_2: retq .Lfunc_end14: .size __hip_module_dtor, .Lfunc_end14-__hip_module_dtor .cfi_endproc # -- End function .type _Z20findMaxNaivelyKernelPi,@object # @_Z20findMaxNaivelyKernelPi .section .rodata,"a",@progbits .globl _Z20findMaxNaivelyKernelPi .p2align 3, 0x0 _Z20findMaxNaivelyKernelPi: .quad _Z35__device_stub__findMaxNaivelyKernelPi .size _Z20findMaxNaivelyKernelPi, 8 .type _Z30findMaxWithoutDivergenceKernelPi,@object # @_Z30findMaxWithoutDivergenceKernelPi .globl _Z30findMaxWithoutDivergenceKernelPi .p2align 3, 0x0 _Z30findMaxWithoutDivergenceKernelPi: .quad _Z45__device_stub__findMaxWithoutDivergenceKernelPi .size _Z30findMaxWithoutDivergenceKernelPi, 8 .type _Z29findMaxWithSharedMemoryKernelPi,@object # @_Z29findMaxWithSharedMemoryKernelPi .globl _Z29findMaxWithSharedMemoryKernelPi .p2align 3, 0x0 _Z29findMaxWithSharedMemoryKernelPi: .quad _Z44__device_stub__findMaxWithSharedMemoryKernelPi .size _Z29findMaxWithSharedMemoryKernelPi, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%s length %d: %g usec\n" .size .L.str, 23 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "%s passed check " .size .L.str.1, 17 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%s failed check " .size .L.str.2, 17 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "(expected %d, got %d)\n" .size .L.str.3, 23 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "Serial" .size .L.str.4, 7 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "Naive Parallel" .size .L.str.5, 15 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "Non Divergent Parallel" .size .L.str.6, 23 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "Shared Memory Parallel" .size .L.str.7, 23 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "Unrecognized!" .size .L.str.8, 14 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z20findMaxNaivelyKernelPi" .size .L__unnamed_1, 27 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z30findMaxWithoutDivergenceKernelPi" .size .L__unnamed_2, 37 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z29findMaxWithSharedMemoryKernelPi" .size .L__unnamed_3, 36 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z35__device_stub__findMaxNaivelyKernelPi .addrsig_sym _Z45__device_stub__findMaxWithoutDivergenceKernelPi .addrsig_sym _Z44__device_stub__findMaxWithSharedMemoryKernelPi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z20findMaxNaivelyKernelPi .addrsig_sym _Z30findMaxWithoutDivergenceKernelPi .addrsig_sym _Z29findMaxWithSharedMemoryKernelPi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z29findMaxWithSharedMemoryKernelPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */ /* 0x000e220000002500 */ /*0020*/ ULDC UR5, c[0x0][0x0] ; /* 0x0000000000057ab9 */ /* 0x000fe20000000800 */ /*0030*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fe200078e00ff */ /*0040*/ ULDC UR4, c[0x0][0xc] ; /* 0x0000030000047ab9 */ /* 0x000fe20000000800 */ /*0050*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */ /* 0x000e220000002100 */ /*0060*/ UIMAD UR4, UR5, UR4, URZ ; /* 0x00000004050472a4 */ /* 0x000fe2000f8e023f */ /*0070*/ BSSY B0, 0x100 ; /* 0x0000008000007945 */ /* 0x000fe20003800000 */ /*0080*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fe20000000a00 */ /*0090*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */ /* 0x000fe400078e00ff */ /*00a0*/ IMAD R0, R8, c[0x0][0x0], R9 ; /* 0x0000000008007a24 */ /* 0x001fc800078e0209 */ /*00b0*/ IMAD.WIDE.U32 R2, R0.reuse, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x040fe200078e0003 */ /*00c0*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */ /* 0x000fda000bf06070 */ /*00d0*/ @P0 BRA 0xf0 ; /* 0x0000001000000947 */ /* 0x000fea0003800000 */ /*00e0*/ LDG.E R4, [R2.64] ; /* 0x0000000602047981 */ /* 0x000164000c1e1900 */ /*00f0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0100*/ USHF.R.U32.HI UR4, URZ, 0x1, UR5 ; /* 0x000000013f047899 */ /* 0x000fe20008011605 */ /*0110*/ STS [R9.X4], R4 ; /* 0x0000000409007388 */ /* 0x0203ea0000004800 */ /*0120*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fda000bf05270 */ /*0130*/ @!P0 BRA 0x230 ; /* 0x000000f000008947 */ /* 0x000fea0003800000 */ /*0140*/ IMAD.SHL.U32 R4, R9, 0x4, RZ ; /* 0x0000000409047824 */ /* 0x002fe200078e00ff */ /*0150*/ MOV R5, UR4 ; /* 0x0000000400057c02 */ /* 0x000fc60008000f00 */ /*0160*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0170*/ ISETP.GE.U32.AND P0, PT, R9, R5, PT ; /* 0x000000050900720c */ /* 0x000fca0003f06070 */ /*0180*/ BSSY B0, 0x200 ; /* 0x0000007000007945 */ /* 0x000ff00003800000 */ /*0190*/ @P0 BRA 0x1f0 ; /* 0x0000005000000947 */ /* 0x002fea0003800000 */ /*01a0*/ IMAD R6, R5, 0x4, R4 ; /* 0x0000000405067824 */ /* 0x000fe200078e0204 */ /*01b0*/ LDS R7, [R9.X4] ; /* 0x0000000009077984 */ /* 0x000fea0000004800 */ /*01c0*/ LDS R6, [R6] ; /* 0x0000000006067984 */ /* 0x000e640000000800 */ /*01d0*/ ISETP.GE.AND P0, PT, R7, R6, PT ; /* 0x000000060700720c */ /* 0x002fda0003f06270 */ /*01e0*/ @!P0 STS [R9.X4], R6 ; /* 0x0000000609008388 */ /* 0x0003e40000004800 */ /*01f0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0200*/ SHF.R.U32.HI R5, RZ, 0x1, R5 ; /* 0x00000001ff057819 */ /* 0x000fc80000011605 */ /*0210*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fda0003f05270 */ /*0220*/ @P0 BRA 0x160 ; /* 0xffffff3000000947 */ /* 0x000fea000383ffff */ /*0230*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x002fe20000010000 */ /*0240*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */ /* 0x000fe40003f05270 */ /*0250*/ ISETP.GE.U32.AND P1, PT, R0, c[0x0][0xc], PT ; /* 0x0000030000007a0c */ /* 0x000fd60003f26070 */ /*0260*/ @!P0 IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff058424 */ /* 0x000fc800078e00ff */ /*0270*/ @!P0 IMAD.WIDE.U32 R4, R8, R5, c[0x0][0x160] ; /* 0x0000580008048625 */ /* 0x000fe200078e0005 */ /*0280*/ @!P0 LDS R7, [RZ] ; /* 0x00000000ff078984 */ /* 0x000e680000000800 */ /*0290*/ @!P0 STG.E [R4.64], R7 ; /* 0x0000000704008986 */ /* 0x0023e8000c101906 */ /*02a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*02b0*/ @P1 EXIT ; /* 0x000000000000194d */ /* 0x000fea0003800000 */ /*02c0*/ ULDC UR4, c[0x0][0xc] ; /* 0x0000030000047ab9 */ /* 0x002fe40000000800 */ /*02d0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */ /* 0x000fcc0008011604 */ /*02e0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fda000bf05270 */ /*02f0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0300*/ LDG.E R6, [R2.64] ; /* 0x0000000602067981 */ /* 0x000364000c1e1900 */ /*0310*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x004fe200000001ff */ /*0320*/ IADD3 R4, R0, UR4, RZ ; /* 0x0000000400047c10 */ /* 0x000fd2000fffe0ff */ /*0330*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fcc00078e0005 */ /*0340*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */ /* 0x000ea2000c1e1900 */ /*0350*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */ /* 0x000fcc0008011604 */ /*0360*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fe4000bf25270 */ /*0370*/ ISETP.GE.AND P0, PT, R6, R5, PT ; /* 0x000000050600720c */ /* 0x024fda0003f06270 */ /*0380*/ @!P0 STG.E [R2.64], R5 ; /* 0x0000000502008986 */ /* 0x0005e2000c101906 */ /*0390*/ @!P0 IMAD.MOV.U32 R6, RZ, RZ, R5 ; /* 0x000000ffff068224 */ /* 0x000fe200078e0005 */ /*03a0*/ @P1 BRA 0x310 ; /* 0xffffff6000001947 */ /* 0x000fea000383ffff */ /*03b0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*03c0*/ BRA 0x3c0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*03d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0400*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0410*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0420*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0430*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0440*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0450*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0460*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0470*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z30findMaxWithoutDivergenceKernelPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */ /* 0x000fe20000000800 */ /*0020*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0030*/ ULDC UR5, c[0x0][0xc] ; /* 0x0000030000057ab9 */ /* 0x000fe40000000800 */ /*0040*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */ /* 0x000fe2000f8e023f */ /*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e660000002100 */ /*0060*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */ /* 0x000fcc0008011604 */ /*0070*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fda000bf05270 */ /*0080*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0090*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x001fe200000001ff */ /*00a0*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x002fe200078e0203 */ /*00b0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fd00000000a00 */ /*00c0*/ IMAD.WIDE.U32 R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fc800078e0005 */ /*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*00e0*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */ /* 0x000fca000bf06070 */ /*00f0*/ BSSY B0, 0x190 ; /* 0x0000009000007945 */ /* 0x000ff00003800000 */ /*0100*/ @P0 BRA 0x180 ; /* 0x0000007000000947 */ /* 0x001fea0003800000 */ /*0110*/ IADD3 R4, R0, UR4, RZ ; /* 0x0000000400047c10 */ /* 0x000fe2000fffe0ff */ /*0120*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x000fe200078e00ff */ /*0130*/ LDG.E R6, [R2.64] ; /* 0x0000000602067981 */ /* 0x000ea6000c1e1900 */ /*0140*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fcc00078e0005 */ /*0150*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */ /* 0x000ea4000c1e1900 */ /*0160*/ ISETP.GE.AND P0, PT, R6, R5, PT ; /* 0x000000050600720c */ /* 0x004fda0003f06270 */ /*0170*/ @!P0 STG.E [R2.64], R5 ; /* 0x0000000502008986 */ /* 0x0001e4000c101906 */ /*0180*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0190*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */ /* 0x000fcc0008011604 */ /*01a0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fda000bf05270 */ /*01b0*/ @P0 BRA 0xd0 ; /* 0xffffff1000000947 */ /* 0x000fea000383ffff */ /*01c0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z20findMaxNaivelyKernelPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff067624 */ /* 0x000fe200078e00ff */ /*0020*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e260000002500 */ /*0030*/ IMAD R6, R6, c[0x0][0xc], RZ ; /* 0x0000030006067a24 */ /* 0x000fe200078e02ff */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e680000002100 */ /*0050*/ ISETP.GE.U32.AND P0, PT, R6, 0x2, PT ; /* 0x000000020600780c */ /* 0x000fda0003f06070 */ /*0060*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0070*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0080*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x003fe200078e0203 */ /*0090*/ UMOV UR4, 0x1 ; /* 0x0000000100047882 */ /* 0x000fe40000000000 */ /*00a0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fcc0000000a00 */ /*00b0*/ IMAD.WIDE.U32 R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fc800078e0005 */ /*00c0*/ UMOV UR5, UR4 ; /* 0x0000000400057c82 */ /* 0x000fe20008000000 */ /*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*00e0*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */ /* 0x000fca000800063f */ /*00f0*/ BSSY B0, 0x2c0 ; /* 0x000001c000007945 */ /* 0x000fe20003800000 */ /*0100*/ IMAD R9, RZ, RZ, -UR4 ; /* 0x80000004ff097e24 */ /* 0x000fe2000f8e02ff */ /*0110*/ ISETP.NE.U32.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fe4000bf25070 */ /*0120*/ I2F.U32.RP R7, UR4 ; /* 0x0000000400077d06 */ /* 0x001e300008209000 */ /*0130*/ MUFU.RCP R7, R7 ; /* 0x0000000700077308 */ /* 0x001e240000001000 */ /*0140*/ IADD3 R4, R7, 0xffffffe, RZ ; /* 0x0ffffffe07047810 */ /* 0x001fcc0007ffe0ff */ /*0150*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */ /* 0x000064000021f000 */ /*0160*/ MOV R4, RZ ; /* 0x000000ff00047202 */ /* 0x001fe20000000f00 */ /*0170*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */ /* 0x002fc800078e02ff */ /*0180*/ IMAD.HI.U32 R5, R5, R9, R4 ; /* 0x0000000905057227 */ /* 0x000fcc00078e0004 */ /*0190*/ IMAD.HI.U32 R5, R5, R0, RZ ; /* 0x0000000005057227 */ /* 0x000fc800078e00ff */ /*01a0*/ IMAD.MOV R5, RZ, RZ, -R5 ; /* 0x000000ffff057224 */ /* 0x000fc800078e0a05 */ /*01b0*/ IMAD R5, R5, UR4, R0 ; /* 0x0000000405057c24 */ /* 0x000fca000f8e0200 */ /*01c0*/ ISETP.GE.U32.AND P0, PT, R5, UR4, PT ; /* 0x0000000405007c0c */ /* 0x000fda000bf06070 */ /*01d0*/ @P0 IADD3 R5, R5, -UR4, RZ ; /* 0x8000000405050c10 */ /* 0x000fc8000fffe0ff */ /*01e0*/ ISETP.GE.U32.AND P0, PT, R5, UR4, PT ; /* 0x0000000405007c0c */ /* 0x000fda000bf06070 */ /*01f0*/ @P0 IADD3 R5, R5, -UR4, RZ ; /* 0x8000000405050c10 */ /* 0x000fe4000fffe0ff */ /*0200*/ @!P1 LOP3.LUT R5, RZ, UR4, RZ, 0x33, !PT ; /* 0x00000004ff059c12 */ /* 0x000fe4000f8e33ff */ /*0210*/ ISETP.LE.U32.AND P0, PT, R6, UR4, PT ; /* 0x0000000406007c0c */ /* 0x000fe4000bf03070 */ /*0220*/ ISETP.NE.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fda0003f25270 */ /*0230*/ @P1 BRA 0x2b0 ; /* 0x0000007000001947 */ /* 0x000fea0003800000 */ /*0240*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0250*/ IADD3 R4, R0, UR5, RZ ; /* 0x0000000500047c10 */ /* 0x000fe2000fffe0ff */ /*0260*/ LDG.E R7, [R2.64] ; /* 0x0000000602077981 */ /* 0x000eb0000c1e1900 */ /*0270*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fcc00078e0005 */ /*0280*/ LDG.E R4, [R4.64] ; /* 0x0000000604047981 */ /* 0x000ea4000c1e1900 */ /*0290*/ ISETP.GE.AND P1, PT, R7, R4, PT ; /* 0x000000040700720c */ /* 0x004fda0003f26270 */ /*02a0*/ @!P1 STG.E [R2.64], R4 ; /* 0x0000000402009986 */ /* 0x0001e4000c101906 */ /*02b0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*02c0*/ @!P0 BRA 0xc0 ; /* 0xfffffdf000008947 */ /* 0x000fea000383ffff */ /*02d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*02e0*/ BRA 0x2e0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z20findMaxNaivelyKernelPi .globl _Z20findMaxNaivelyKernelPi .p2align 8 .type _Z20findMaxNaivelyKernelPi,@function _Z20findMaxNaivelyKernelPi: s_clause 0x1 s_load_b32 s2, s[0:1], 0x14 s_load_b32 s4, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_i32 s2, s4, s3 s_cmp_lt_u32 s2, 2 s_cbranch_scc1 .LBB0_6 s_load_b64 s[0:1], s[0:1], 0x0 v_mov_b32_e32 v1, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_mad_u64_u32 v[2:3], null, s15, s3, v[0:1] v_mov_b32_e32 v3, v1 s_mov_b32 s3, 1 v_lshlrev_b64 v[3:4], 2, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s0, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo s_set_inst_prefetch_distance 0x1 s_branch .LBB0_3 .p2align 6 .LBB0_2: s_or_b32 exec_lo, exec_lo, s4 s_cmp_ge_u32 s3, s2 s_cbranch_scc1 .LBB0_6 .LBB0_3: s_mov_b32 s5, s3 s_lshl_b32 s3, s3, 1 s_delay_alu instid0(SALU_CYCLE_1) s_add_i32 s4, s3, -1 s_waitcnt_vscnt null, 0x0 s_barrier v_and_b32_e32 v0, s4, v2 s_mov_b32 s4, exec_lo buffer_gl0_inv v_cmpx_eq_u32_e32 0, v0 s_cbranch_execz .LBB0_2 v_add_nc_u32_e32 v0, s5, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[5:6], 2, v[0:1] v_add_co_u32 v5, vcc_lo, s0, v5 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v6, vcc_lo, s1, v6, vcc_lo s_clause 0x1 global_load_b32 v0, v[5:6], off global_load_b32 v5, v[3:4], off s_waitcnt vmcnt(0) v_cmp_lt_i32_e32 vcc_lo, v5, v0 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_2 global_store_b32 v[3:4], v0, off s_branch .LBB0_2 .LBB0_6: s_set_inst_prefetch_distance 0x2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z20findMaxNaivelyKernelPi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 264 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z20findMaxNaivelyKernelPi, .Lfunc_end0-_Z20findMaxNaivelyKernelPi .section .AMDGPU.csdata,"",@progbits .text .protected _Z30findMaxWithoutDivergenceKernelPi .globl _Z30findMaxWithoutDivergenceKernelPi .p2align 8 .type _Z30findMaxWithoutDivergenceKernelPi,@function _Z30findMaxWithoutDivergenceKernelPi: s_clause 0x1 s_load_b32 s2, s[0:1], 0x14 s_load_b32 s4, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_i32 s2, s4, s3 s_cmp_lt_u32 s2, 2 s_cbranch_scc1 .LBB1_6 s_load_b64 s[0:1], s[0:1], 0x0 v_mov_b32_e32 v1, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[2:3], null, s15, s3, v[0:1] v_mov_b32_e32 v3, v1 v_lshlrev_b64 v[3:4], 2, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s0, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo s_set_inst_prefetch_distance 0x1 s_branch .LBB1_3 .p2align 6 .LBB1_2: s_or_b32 exec_lo, exec_lo, s4 s_cmp_lt_u32 s3, 4 s_cbranch_scc1 .LBB1_6 .LBB1_3: s_mov_b32 s3, s2 s_lshr_b32 s2, s2, 1 s_mov_b32 s4, exec_lo s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv v_cmpx_gt_u32_e64 s2, v2 s_cbranch_execz .LBB1_2 v_add_nc_u32_e32 v0, s2, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[5:6], 2, v[0:1] v_add_co_u32 v5, vcc_lo, s0, v5 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v6, vcc_lo, s1, v6, vcc_lo s_clause 0x1 global_load_b32 v0, v[5:6], off global_load_b32 v5, v[3:4], off s_waitcnt vmcnt(0) v_cmp_lt_i32_e32 vcc_lo, v5, v0 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB1_2 global_store_b32 v[3:4], v0, off s_branch .LBB1_2 .LBB1_6: s_set_inst_prefetch_distance 0x2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z30findMaxWithoutDivergenceKernelPi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 264 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z30findMaxWithoutDivergenceKernelPi, .Lfunc_end1-_Z30findMaxWithoutDivergenceKernelPi .section .AMDGPU.csdata,"",@progbits .text .protected _Z29findMaxWithSharedMemoryKernelPi .globl _Z29findMaxWithSharedMemoryKernelPi .p2align 8 .type _Z29findMaxWithSharedMemoryKernelPi,@function _Z29findMaxWithSharedMemoryKernelPi: s_clause 0x2 s_load_b32 s3, s[0:1], 0x14 s_load_b32 s4, s[0:1], 0x8 s_load_b64 s[0:1], s[0:1], 0x0 s_mov_b32 s2, s15 v_mov_b32_e32 v3, 0 s_waitcnt lgkmcnt(0) s_and_b32 s3, s3, 0xffff s_delay_alu instid0(SALU_CYCLE_1) v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1] s_mul_i32 s5, s4, s3 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_cmp_gt_u32_e32 vcc_lo, s5, v1 s_and_saveexec_b32 s5, vcc_lo s_cbranch_execz .LBB2_2 v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[1:2] v_add_co_u32 v2, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo global_load_b32 v3, v[2:3], off .LBB2_2: s_or_b32 exec_lo, exec_lo, s5 v_lshlrev_b32_e32 v2, 2, v0 s_cmp_lt_u32 s3, 2 s_waitcnt vmcnt(0) ds_store_b32 v2, v3 s_cbranch_scc0 .LBB2_8 .LBB2_3: s_mov_b32 s3, 0 s_mov_b32 s5, exec_lo s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv v_cmpx_eq_u32_e32 0, v0 s_cbranch_execz .LBB2_5 v_mov_b32_e32 v0, 0 s_lshl_b64 s[2:3], s[2:3], 2 s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s2, s0, s2 s_addc_u32 s3, s1, s3 ds_load_b32 v2, v0 s_waitcnt lgkmcnt(0) global_store_b32 v0, v2, s[2:3] .LBB2_5: s_or_b32 exec_lo, exec_lo, s5 v_max_u32_e32 v0, 1, v1 s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv s_mov_b32 s2, exec_lo v_cmpx_gt_u32_e64 s4, v0 s_cbranch_execz .LBB2_14 v_mov_b32_e32 v3, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mov_b32_e32 v2, v3 v_lshlrev_b64 v[4:5], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v4, vcc_lo, s0, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo global_load_b32 v0, v[4:5], off s_branch .LBB2_12 .p2align 6 .LBB2_7: s_or_b32 exec_lo, exec_lo, s6 s_cmp_lt_u32 s5, 4 s_cbranch_scc1 .LBB2_3 .LBB2_8: s_mov_b32 s5, s3 s_lshr_b32 s3, s3, 1 s_mov_b32 s6, exec_lo s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv v_cmpx_gt_u32_e64 s3, v0 s_cbranch_execz .LBB2_7 v_add_lshl_u32 v3, s3, v0, 2 ds_load_b32 v3, v3 ds_load_b32 v4, v2 s_waitcnt lgkmcnt(0) v_cmp_lt_i32_e32 vcc_lo, v4, v3 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB2_7 ds_store_b32 v2, v3 s_branch .LBB2_7 .p2align 6 .LBB2_11: s_or_b32 exec_lo, exec_lo, s3 s_cmp_gt_u32 s4, 3 s_mov_b32 s4, s2 s_cbranch_scc0 .LBB2_14 .LBB2_12: s_lshr_b32 s2, s4, 1 s_mov_b32 s3, exec_lo v_add_nc_u32_e32 v2, s2, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[6:7], 2, v[2:3] v_add_co_u32 v6, vcc_lo, s0, v6 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v7, vcc_lo, s1, v7, vcc_lo global_load_b32 v2, v[6:7], off s_waitcnt vmcnt(0) v_cmpx_lt_i32_e64 v0, v2 s_cbranch_execz .LBB2_11 v_mov_b32_e32 v0, v2 global_store_b32 v[4:5], v2, off s_branch .LBB2_11 .LBB2_14: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z29findMaxWithSharedMemoryKernelPi .amdhsa_group_segment_fixed_size 2048 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 264 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z29findMaxWithSharedMemoryKernelPi, .Lfunc_end2-_Z29findMaxWithSharedMemoryKernelPi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: hidden_block_count_x - .offset: 12 .size: 4 .value_kind: hidden_block_count_y - .offset: 16 .size: 4 .value_kind: hidden_block_count_z - .offset: 20 .size: 2 .value_kind: hidden_group_size_x - .offset: 22 .size: 2 .value_kind: hidden_group_size_y - .offset: 24 .size: 2 .value_kind: hidden_group_size_z - .offset: 26 .size: 2 .value_kind: hidden_remainder_x - .offset: 28 .size: 2 .value_kind: hidden_remainder_y - .offset: 30 .size: 2 .value_kind: hidden_remainder_z - .offset: 48 .size: 8 .value_kind: hidden_global_offset_x - .offset: 56 .size: 8 .value_kind: hidden_global_offset_y - .offset: 64 .size: 8 .value_kind: hidden_global_offset_z - .offset: 72 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 264 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z20findMaxNaivelyKernelPi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z20findMaxNaivelyKernelPi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: hidden_block_count_x - .offset: 12 .size: 4 .value_kind: hidden_block_count_y - .offset: 16 .size: 4 .value_kind: hidden_block_count_z - .offset: 20 .size: 2 .value_kind: hidden_group_size_x - .offset: 22 .size: 2 .value_kind: hidden_group_size_y - .offset: 24 .size: 2 .value_kind: hidden_group_size_z - .offset: 26 .size: 2 .value_kind: hidden_remainder_x - .offset: 28 .size: 2 .value_kind: hidden_remainder_y - .offset: 30 .size: 2 .value_kind: hidden_remainder_z - .offset: 48 .size: 8 .value_kind: hidden_global_offset_x - .offset: 56 .size: 8 .value_kind: hidden_global_offset_y - .offset: 64 .size: 8 .value_kind: hidden_global_offset_z - .offset: 72 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 264 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z30findMaxWithoutDivergenceKernelPi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z30findMaxWithoutDivergenceKernelPi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: hidden_block_count_x - .offset: 12 .size: 4 .value_kind: hidden_block_count_y - .offset: 16 .size: 4 .value_kind: hidden_block_count_z - .offset: 20 .size: 2 .value_kind: hidden_group_size_x - .offset: 22 .size: 2 .value_kind: hidden_group_size_y - .offset: 24 .size: 2 .value_kind: hidden_group_size_z - .offset: 26 .size: 2 .value_kind: hidden_remainder_x - .offset: 28 .size: 2 .value_kind: hidden_remainder_y - .offset: 30 .size: 2 .value_kind: hidden_remainder_z - .offset: 48 .size: 8 .value_kind: hidden_global_offset_x - .offset: 56 .size: 8 .value_kind: hidden_global_offset_y - .offset: 64 .size: 8 .value_kind: hidden_global_offset_z - .offset: 72 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 2048 .kernarg_segment_align: 8 .kernarg_segment_size: 264 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z29findMaxWithSharedMemoryKernelPi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z29findMaxWithSharedMemoryKernelPi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000ea68b_00000000-6_reduction.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2069: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2069: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z13findMaxOnHostPii .type _Z13findMaxOnHostPii, @function _Z13findMaxOnHostPii: .LFB2057: .cfi_startproc endbr64 movl $1, %r8d cmpl $1, %esi jg .L4 .L5: movl (%rdi), %eax ret .L6: addl %r8d, %edx addq %r10, %rax cmpl %edx, %esi jle .L11 .L7: movl (%rax,%r9,4), %ecx cmpl %ecx, (%rax) jge .L6 movl %ecx, (%rax) jmp .L6 .L11: cmpl %esi, %r8d jge .L5 .L4: movl %r8d, %r9d addl %r8d, %r8d movslq %r8d, %r10 salq $2, %r10 movq %rdi, %rax movl $0, %edx movslq %r9d, %r9 jmp .L7 .cfi_endproc .LFE2057: .size _Z13findMaxOnHostPii, .-_Z13findMaxOnHostPii .globl _Z15initializeArrayi .type _Z15initializeArrayi, @function _Z15initializeArrayi: .LFB2061: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movl %edi, %ebx movslq %edi, %rdi salq $2, %rdi call malloc@PLT testl %ebx, %ebx jle .L12 movq %rax, %rcx leal 1(%rbx,%rbx), %esi movl $1, %edx .L14: movl %edx, (%rcx) addl $2, %edx addq $4, %rcx cmpl %esi, %edx jne .L14 .L12: popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _Z15initializeArrayi, .-_Z15initializeArrayi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%s length %d: %g usec\n" .text .globl _Z11printTiming7timevalS_Pci .type _Z11printTiming7timevalS_Pci, @function _Z11printTiming7timevalS_Pci: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq %rcx, %rax movq %r8, %rdx movl %r9d, %ecx subq %rsi, %rax pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 leaq .LC0(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _Z11printTiming7timevalS_Pci, .-_Z11printTiming7timevalS_Pci .section .rodata.str1.1 .LC1: .string "%s passed check " .LC2: .string "%s failed check " .LC3: .string "(expected %d, got %d)\n" .text .globl _Z19checkAndPrintResultiiPc .type _Z19checkAndPrintResultiiPc, @function _Z19checkAndPrintResultiiPc: .LFB2063: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movl %edi, %ebx movl %esi, %ebp cmpl %esi, %edi je .L23 leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L21: movl %ebp, %ecx movl %ebx, %edx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L21 .cfi_endproc .LFE2063: .size _Z19checkAndPrintResultiiPc, .-_Z19checkAndPrintResultiiPc .globl _Z40__device_stub__Z20findMaxNaivelyKernelPiPi .type _Z40__device_stub__Z20findMaxNaivelyKernelPiPi, @function _Z40__device_stub__Z20findMaxNaivelyKernelPiPi: .LFB2091: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L28 .L24: movq 88(%rsp), %rax subq %fs:40, %rax jne .L29 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L28: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z20findMaxNaivelyKernelPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L24 .L29: call __stack_chk_fail@PLT .cfi_endproc .LFE2091: .size _Z40__device_stub__Z20findMaxNaivelyKernelPiPi, .-_Z40__device_stub__Z20findMaxNaivelyKernelPiPi .globl _Z20findMaxNaivelyKernelPi .type _Z20findMaxNaivelyKernelPi, @function _Z20findMaxNaivelyKernelPi: .LFB2092: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z40__device_stub__Z20findMaxNaivelyKernelPiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2092: .size _Z20findMaxNaivelyKernelPi, .-_Z20findMaxNaivelyKernelPi .globl _Z26cudaFindMaxOnDeviceNaivelyPii .type _Z26cudaFindMaxOnDeviceNaivelyPii, @function _Z26cudaFindMaxOnDeviceNaivelyPii: .LFB2058: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $48, %rsp .cfi_def_cfa_offset 80 movq %rdi, %r12 movl %esi, %ebx movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leal 0(,%rsi,4), %ebp movslq %ebp, %rbp leaq 8(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbp, %rdx movq %r12, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $512, 16(%rsp) movl $1, 20(%rsp) leal 511(%rbx), %eax testl %ebx, %ebx cmovns %ebx, %eax sarl $9, %eax movl %eax, 28(%rsp) movl $1, 32(%rsp) movl $0, %r9d movl $0, %r8d movq 16(%rsp), %rdx movl $1, %ecx movq 28(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L36 .L33: leaq 4(%rsp), %rdi movl $2, %ecx movl $4, %edx movq 8(%rsp), %rsi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movl 4(%rsp), %eax movq 40(%rsp), %rdx subq %fs:40, %rdx jne .L37 addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L36: .cfi_restore_state movq 8(%rsp), %rdi call _Z40__device_stub__Z20findMaxNaivelyKernelPiPi jmp .L33 .L37: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size _Z26cudaFindMaxOnDeviceNaivelyPii, .-_Z26cudaFindMaxOnDeviceNaivelyPii .globl _Z50__device_stub__Z30findMaxWithoutDivergenceKernelPiPi .type _Z50__device_stub__Z30findMaxWithoutDivergenceKernelPiPi, @function _Z50__device_stub__Z30findMaxWithoutDivergenceKernelPiPi: .LFB2093: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L42 .L38: movq 88(%rsp), %rax subq %fs:40, %rax jne .L43 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L42: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z30findMaxWithoutDivergenceKernelPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L38 .L43: call __stack_chk_fail@PLT .cfi_endproc .LFE2093: .size _Z50__device_stub__Z30findMaxWithoutDivergenceKernelPiPi, .-_Z50__device_stub__Z30findMaxWithoutDivergenceKernelPiPi .globl _Z30findMaxWithoutDivergenceKernelPi .type _Z30findMaxWithoutDivergenceKernelPi, @function _Z30findMaxWithoutDivergenceKernelPi: .LFB2094: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z50__device_stub__Z30findMaxWithoutDivergenceKernelPiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2094: .size _Z30findMaxWithoutDivergenceKernelPi, .-_Z30findMaxWithoutDivergenceKernelPi .globl _Z28cudaFindMaxWithoutDivergencePii .type _Z28cudaFindMaxWithoutDivergencePii, @function _Z28cudaFindMaxWithoutDivergencePii: .LFB2059: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $48, %rsp .cfi_def_cfa_offset 80 movq %rdi, %r12 movl %esi, %ebx movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leal 0(,%rsi,4), %ebp movslq %ebp, %rbp leaq 8(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbp, %rdx movq %r12, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $512, 16(%rsp) movl $1, 20(%rsp) leal 511(%rbx), %eax testl %ebx, %ebx cmovns %ebx, %eax sarl $9, %eax movl %eax, 28(%rsp) movl $1, 32(%rsp) movl $0, %r9d movl $0, %r8d movq 16(%rsp), %rdx movl $1, %ecx movq 28(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L50 .L47: leaq 4(%rsp), %rdi movl $2, %ecx movl $4, %edx movq 8(%rsp), %rsi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movl 4(%rsp), %eax movq 40(%rsp), %rdx subq %fs:40, %rdx jne .L51 addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L50: .cfi_restore_state movq 8(%rsp), %rdi call _Z50__device_stub__Z30findMaxWithoutDivergenceKernelPiPi jmp .L47 .L51: call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size _Z28cudaFindMaxWithoutDivergencePii, .-_Z28cudaFindMaxWithoutDivergencePii .globl _Z49__device_stub__Z29findMaxWithSharedMemoryKernelPiPi .type _Z49__device_stub__Z29findMaxWithSharedMemoryKernelPiPi, @function _Z49__device_stub__Z29findMaxWithSharedMemoryKernelPiPi: .LFB2095: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L56 .L52: movq 88(%rsp), %rax subq %fs:40, %rax jne .L57 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L56: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z29findMaxWithSharedMemoryKernelPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L52 .L57: call __stack_chk_fail@PLT .cfi_endproc .LFE2095: .size _Z49__device_stub__Z29findMaxWithSharedMemoryKernelPiPi, .-_Z49__device_stub__Z29findMaxWithSharedMemoryKernelPiPi .globl _Z29findMaxWithSharedMemoryKernelPi .type _Z29findMaxWithSharedMemoryKernelPi, @function _Z29findMaxWithSharedMemoryKernelPi: .LFB2096: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z49__device_stub__Z29findMaxWithSharedMemoryKernelPiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2096: .size _Z29findMaxWithSharedMemoryKernelPi, .-_Z29findMaxWithSharedMemoryKernelPi .globl _Z27cudaFindMaxWithSharedMemoryPii .type _Z27cudaFindMaxWithSharedMemoryPii, @function _Z27cudaFindMaxWithSharedMemoryPii: .LFB2060: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $48, %rsp .cfi_def_cfa_offset 80 movq %rdi, %r12 movl %esi, %ebx movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leal 0(,%rsi,4), %ebp movslq %ebp, %rbp leaq 8(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbp, %rdx movq %r12, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $512, 16(%rsp) movl $1, 20(%rsp) leal 511(%rbx), %eax testl %ebx, %ebx cmovns %ebx, %eax sarl $9, %eax movl %eax, 28(%rsp) movl $1, 32(%rsp) movl $0, %r9d movl $0, %r8d movq 16(%rsp), %rdx movl $1, %ecx movq 28(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L64 .L61: leaq 4(%rsp), %rdi movl $2, %ecx movl $4, %edx movq 8(%rsp), %rsi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movl 4(%rsp), %eax movq 40(%rsp), %rdx subq %fs:40, %rdx jne .L65 addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L64: .cfi_restore_state movq 8(%rsp), %rdi call _Z49__device_stub__Z29findMaxWithSharedMemoryKernelPiPi jmp .L61 .L65: call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size _Z27cudaFindMaxWithSharedMemoryPii, .-_Z27cudaFindMaxWithSharedMemoryPii .section .rodata.str1.1 .LC4: .string "Serial" .LC5: .string "Naive Parallel" .LC6: .string "Non Divergent Parallel" .LC7: .string "Shared Memory Parallel" .LC8: .string "Unrecognized!" .text .globl _Z20launchTestWithTimingii .type _Z20launchTestWithTimingii, @function _Z20launchTestWithTimingii: .LFB2064: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $48, %rsp .cfi_def_cfa_offset 96 movl %edi, %ebp movl %esi, %ebx movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl %esi, %edi call _Z15initializeArrayi movq %rax, %r12 movslq %ebx, %rax movl -4(%r12,%rax,4), %r14d movq %rsp, %rdi movl $0, %esi call gettimeofday@PLT cmpl $2, %ebp je .L67 jg .L68 testl %ebp, %ebp je .L69 cmpl $1, %ebp jne .L75 movl %ebx, %esi movq %r12, %rdi call _Z26cudaFindMaxOnDeviceNaivelyPii movl %eax, %r13d leaq .LC5(%rip), %rbp jmp .L71 .L75: leaq .LC8(%rip), %rbp jmp .L71 .L68: cmpl $3, %ebp jne .L76 movl %ebx, %esi movq %r12, %rdi call _Z27cudaFindMaxWithSharedMemoryPii movl %eax, %r13d leaq .LC7(%rip), %rbp jmp .L71 .L76: leaq .LC8(%rip), %rbp jmp .L71 .L69: movl %ebx, %esi movq %r12, %rdi call _Z13findMaxOnHostPii movl %eax, %r13d leaq .LC4(%rip), %rbp .L71: leaq 16(%rsp), %rdi movl $0, %esi call gettimeofday@PLT movq %r12, %rdi call free@PLT movq %rbp, %rdx movl %r13d, %esi movl %r14d, %edi call _Z19checkAndPrintResultiiPc movl %ebx, %r9d movq %rbp, %r8 movq 16(%rsp), %rdx movq 24(%rsp), %rcx movq (%rsp), %rdi movq 8(%rsp), %rsi call _Z11printTiming7timevalS_Pci movq 40(%rsp), %rax subq %fs:40, %rax jne .L77 addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L67: .cfi_restore_state movl %ebx, %esi movq %r12, %rdi call _Z28cudaFindMaxWithoutDivergencePii movl %eax, %r13d leaq .LC6(%rip), %rbp jmp .L71 .L77: call __stack_chk_fail@PLT .cfi_endproc .LFE2064: .size _Z20launchTestWithTimingii, .-_Z20launchTestWithTimingii .globl _Z7runTesti .type _Z7runTesti, @function _Z7runTesti: .LFB2065: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movl %edi, %ebx movl %edi, %esi movl $0, %edi call _Z20launchTestWithTimingii movl %ebx, %esi movl $1, %edi call _Z20launchTestWithTimingii movl %ebx, %esi movl $1, %edi call _Z20launchTestWithTimingii movl %ebx, %esi movl $2, %edi call _Z20launchTestWithTimingii movl %ebx, %esi movl $3, %edi call _Z20launchTestWithTimingii popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2065: .size _Z7runTesti, .-_Z7runTesti .globl main .type main, @function main: .LFB2066: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movl $1024, %edi call _Z7runTesti movl $4096, %edi call _Z7runTesti movl $0, %eax addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2066: .size main, .-main .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC9: .string "_Z29findMaxWithSharedMemoryKernelPi" .align 8 .LC10: .string "_Z30findMaxWithoutDivergenceKernelPi" .section .rodata.str1.1 .LC11: .string "_Z20findMaxNaivelyKernelPi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2098: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _Z29findMaxWithSharedMemoryKernelPi(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC10(%rip), %rdx movq %rdx, %rcx leaq _Z30findMaxWithoutDivergenceKernelPi(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC11(%rip), %rdx movq %rdx, %rcx leaq _Z20findMaxNaivelyKernelPi(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2098: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "reduction.hip" .globl _Z13findMaxOnHostPii # -- Begin function _Z13findMaxOnHostPii .p2align 4, 0x90 .type _Z13findMaxOnHostPii,@function _Z13findMaxOnHostPii: # @_Z13findMaxOnHostPii .cfi_startproc # %bb.0: cmpl $2, %esi jl .LBB0_7 # %bb.1: # %.preheader.lr.ph movl %esi, %eax movl $1, %ecx jmp .LBB0_2 .p2align 4, 0x90 .LBB0_6: # %._crit_edge # in Loop: Header=BB0_2 Depth=1 cmpl %esi, %ecx jge .LBB0_7 .LBB0_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB0_3 Depth 2 # kill: def $ecx killed $ecx def $rcx movslq %ecx, %r8 addl %ecx, %ecx movslq %ecx, %rdx leaq (%rdi,%r8,4), %r8 xorl %r9d, %r9d jmp .LBB0_3 .p2align 4, 0x90 .LBB0_5: # in Loop: Header=BB0_3 Depth=2 addq %rdx, %r9 cmpq %rax, %r9 jge .LBB0_6 .LBB0_3: # Parent Loop BB0_2 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r8,%r9,4), %r10d cmpl %r10d, (%rdi,%r9,4) jge .LBB0_5 # %bb.4: # in Loop: Header=BB0_3 Depth=2 movl %r10d, (%rdi,%r9,4) jmp .LBB0_5 .LBB0_7: # %._crit_edge21 movl (%rdi), %eax retq .Lfunc_end0: .size _Z13findMaxOnHostPii, .Lfunc_end0-_Z13findMaxOnHostPii .cfi_endproc # -- End function .globl _Z35__device_stub__findMaxNaivelyKernelPi # -- Begin function _Z35__device_stub__findMaxNaivelyKernelPi .p2align 4, 0x90 .type _Z35__device_stub__findMaxNaivelyKernelPi,@function _Z35__device_stub__findMaxNaivelyKernelPi: # @_Z35__device_stub__findMaxNaivelyKernelPi .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z20findMaxNaivelyKernelPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end1: .size _Z35__device_stub__findMaxNaivelyKernelPi, .Lfunc_end1-_Z35__device_stub__findMaxNaivelyKernelPi .cfi_endproc # -- End function .globl _Z26cudaFindMaxOnDeviceNaivelyPii # -- Begin function _Z26cudaFindMaxOnDeviceNaivelyPii .p2align 4, 0x90 .type _Z26cudaFindMaxOnDeviceNaivelyPii,@function _Z26cudaFindMaxOnDeviceNaivelyPii: # @_Z26cudaFindMaxOnDeviceNaivelyPii .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $80, %rsp .cfi_def_cfa_offset 112 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %esi, %ebx movq %rdi, %r14 leal (,%rbx,4), %eax movslq %eax, %r15 leaq 8(%rsp), %rdi movq %r15, %rsi callq hipMalloc movq 8(%rsp), %rdi movq %r14, %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy leal 511(%rbx), %edi testl %ebx, %ebx cmovnsl %ebx, %edi sarl $9, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $512, %rdx # imm = 0x200 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 72(%rsp) leaq 72(%rsp), %rax movq %rax, 32(%rsp) leaq 16(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 16(%rsp), %rsi movl 24(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 32(%rsp), %r9 movl $_Z20findMaxNaivelyKernelPi, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_2: movq 8(%rsp), %rsi leaq 16(%rsp), %rdi movl $4, %edx movl $2, %ecx callq hipMemcpy movq 8(%rsp), %rdi callq hipFree movl 16(%rsp), %eax addq $80, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z26cudaFindMaxOnDeviceNaivelyPii, .Lfunc_end2-_Z26cudaFindMaxOnDeviceNaivelyPii .cfi_endproc # -- End function .globl _Z45__device_stub__findMaxWithoutDivergenceKernelPi # -- Begin function _Z45__device_stub__findMaxWithoutDivergenceKernelPi .p2align 4, 0x90 .type _Z45__device_stub__findMaxWithoutDivergenceKernelPi,@function _Z45__device_stub__findMaxWithoutDivergenceKernelPi: # @_Z45__device_stub__findMaxWithoutDivergenceKernelPi .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z30findMaxWithoutDivergenceKernelPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end3: .size _Z45__device_stub__findMaxWithoutDivergenceKernelPi, .Lfunc_end3-_Z45__device_stub__findMaxWithoutDivergenceKernelPi .cfi_endproc # -- End function .globl _Z28cudaFindMaxWithoutDivergencePii # -- Begin function _Z28cudaFindMaxWithoutDivergencePii .p2align 4, 0x90 .type _Z28cudaFindMaxWithoutDivergencePii,@function _Z28cudaFindMaxWithoutDivergencePii: # @_Z28cudaFindMaxWithoutDivergencePii .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $80, %rsp .cfi_def_cfa_offset 112 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %esi, %ebx movq %rdi, %r14 leal (,%rbx,4), %eax movslq %eax, %r15 leaq 8(%rsp), %rdi movq %r15, %rsi callq hipMalloc movq 8(%rsp), %rdi movq %r14, %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy leal 511(%rbx), %edi testl %ebx, %ebx cmovnsl %ebx, %edi sarl $9, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $512, %rdx # imm = 0x200 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 72(%rsp) leaq 72(%rsp), %rax movq %rax, 32(%rsp) leaq 16(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 16(%rsp), %rsi movl 24(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 32(%rsp), %r9 movl $_Z30findMaxWithoutDivergenceKernelPi, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_2: movq 8(%rsp), %rsi leaq 16(%rsp), %rdi movl $4, %edx movl $2, %ecx callq hipMemcpy movq 8(%rsp), %rdi callq hipFree movl 16(%rsp), %eax addq $80, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _Z28cudaFindMaxWithoutDivergencePii, .Lfunc_end4-_Z28cudaFindMaxWithoutDivergencePii .cfi_endproc # -- End function .globl _Z44__device_stub__findMaxWithSharedMemoryKernelPi # -- Begin function _Z44__device_stub__findMaxWithSharedMemoryKernelPi .p2align 4, 0x90 .type _Z44__device_stub__findMaxWithSharedMemoryKernelPi,@function _Z44__device_stub__findMaxWithSharedMemoryKernelPi: # @_Z44__device_stub__findMaxWithSharedMemoryKernelPi .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z29findMaxWithSharedMemoryKernelPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end5: .size _Z44__device_stub__findMaxWithSharedMemoryKernelPi, .Lfunc_end5-_Z44__device_stub__findMaxWithSharedMemoryKernelPi .cfi_endproc # -- End function .globl _Z27cudaFindMaxWithSharedMemoryPii # -- Begin function _Z27cudaFindMaxWithSharedMemoryPii .p2align 4, 0x90 .type _Z27cudaFindMaxWithSharedMemoryPii,@function _Z27cudaFindMaxWithSharedMemoryPii: # @_Z27cudaFindMaxWithSharedMemoryPii .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $80, %rsp .cfi_def_cfa_offset 112 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %esi, %ebx movq %rdi, %r14 leal (,%rbx,4), %eax movslq %eax, %r15 leaq 8(%rsp), %rdi movq %r15, %rsi callq hipMalloc movq 8(%rsp), %rdi movq %r14, %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy leal 511(%rbx), %edi testl %ebx, %ebx cmovnsl %ebx, %edi sarl $9, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $512, %rdx # imm = 0x200 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB6_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 72(%rsp) leaq 72(%rsp), %rax movq %rax, 32(%rsp) leaq 16(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 16(%rsp), %rsi movl 24(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 32(%rsp), %r9 movl $_Z29findMaxWithSharedMemoryKernelPi, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB6_2: movq 8(%rsp), %rsi leaq 16(%rsp), %rdi movl $4, %edx movl $2, %ecx callq hipMemcpy movq 8(%rsp), %rdi callq hipFree movl 16(%rsp), %eax addq $80, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end6: .size _Z27cudaFindMaxWithSharedMemoryPii, .Lfunc_end6-_Z27cudaFindMaxWithSharedMemoryPii .cfi_endproc # -- End function .globl _Z15initializeArrayi # -- Begin function _Z15initializeArrayi .p2align 4, 0x90 .type _Z15initializeArrayi,@function _Z15initializeArrayi: # @_Z15initializeArrayi .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl %edi, %ebx movslq %edi, %r14 leaq (,%r14,4), %rdi callq malloc testl %r14d, %r14d jle .LBB7_3 # %bb.1: # %.lr.ph.preheader movl %ebx, %ecx addq %rcx, %rcx xorl %edx, %edx .p2align 4, 0x90 .LBB7_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 leal 1(%rdx), %esi movl %esi, (%rax,%rdx,2) addq $2, %rdx cmpq %rdx, %rcx jne .LBB7_2 .LBB7_3: # %._crit_edge addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .Lfunc_end7: .size _Z15initializeArrayi, .Lfunc_end7-_Z15initializeArrayi .cfi_endproc # -- End function .globl _Z11printTiming7timevalS_Pci # -- Begin function _Z11printTiming7timevalS_Pci .p2align 4, 0x90 .type _Z11printTiming7timevalS_Pci,@function _Z11printTiming7timevalS_Pci: # @_Z11printTiming7timevalS_Pci .cfi_startproc # %bb.0: movl %r9d, %edx subq %rsi, %rcx cvtsi2sd %rcx, %xmm0 movl $.L.str, %edi movq %r8, %rsi movb $1, %al jmp printf # TAILCALL .Lfunc_end8: .size _Z11printTiming7timevalS_Pci, .Lfunc_end8-_Z11printTiming7timevalS_Pci .cfi_endproc # -- End function .globl _Z19checkAndPrintResultiiPc # -- Begin function _Z19checkAndPrintResultiiPc .p2align 4, 0x90 .type _Z19checkAndPrintResultiiPc,@function _Z19checkAndPrintResultiiPc: # @_Z19checkAndPrintResultiiPc .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %rbp, -16 movl %esi, %ebx movl %edi, %ebp cmpl %esi, %edi movl $.L.str.1, %eax movl $.L.str.2, %edi cmoveq %rax, %rdi movq %rdx, %rsi xorl %eax, %eax callq printf movl $.L.str.3, %edi movl %ebp, %esi movl %ebx, %edx xorl %eax, %eax addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp printf # TAILCALL .Lfunc_end9: .size _Z19checkAndPrintResultiiPc, .Lfunc_end9-_Z19checkAndPrintResultiiPc .cfi_endproc # -- End function .globl _Z20launchTestWithTimingii # -- Begin function _Z20launchTestWithTimingii .p2align 4, 0x90 .type _Z20launchTestWithTimingii,@function _Z20launchTestWithTimingii: # @_Z20launchTestWithTimingii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $32, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %esi, %ebx movl %edi, %r15d movslq %esi, %r12 leaq (,%r12,4), %rdi callq malloc movq %rax, %r14 testl %r12d, %r12d jle .LBB10_3 # %bb.1: # %.lr.ph.preheader.i movl %ebx, %eax addq %rax, %rax xorl %ecx, %ecx .p2align 4, 0x90 .LBB10_2: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 leal 1(%rcx), %edx movl %edx, (%r14,%rcx,2) addq $2, %rcx cmpq %rcx, %rax jne .LBB10_2 .LBB10_3: # %_Z15initializeArrayi.exit movl -4(%r14,%r12,4), %ebp leaq 16(%rsp), %rdi xorl %esi, %esi callq gettimeofday cmpl $3, %r15d ja .LBB10_4 # %bb.5: # %_Z15initializeArrayi.exit movl %r15d, %eax jmpq *.LJTI10_0(,%rax,8) .LBB10_6: cmpl $2, %ebx jl .LBB10_13 # %bb.7: # %.preheader.lr.ph.i movl %ebx, %eax movl $1, %ecx jmp .LBB10_8 .p2align 4, 0x90 .LBB10_12: # %._crit_edge.i # in Loop: Header=BB10_8 Depth=1 cmpl %ebx, %ecx jge .LBB10_13 .LBB10_8: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB10_9 Depth 2 # kill: def $ecx killed $ecx def $rcx movslq %ecx, %rsi addl %ecx, %ecx movslq %ecx, %rdx leaq (%r14,%rsi,4), %rsi xorl %edi, %edi jmp .LBB10_9 .p2align 4, 0x90 .LBB10_11: # in Loop: Header=BB10_9 Depth=2 addq %rdx, %rdi cmpq %rax, %rdi jge .LBB10_12 .LBB10_9: # Parent Loop BB10_8 Depth=1 # => This Inner Loop Header: Depth=2 movl (%rsi,%rdi,4), %r8d cmpl %r8d, (%r14,%rdi,4) jge .LBB10_11 # %bb.10: # in Loop: Header=BB10_9 Depth=2 movl %r8d, (%r14,%rdi,4) jmp .LBB10_11 .LBB10_15: movq %r14, %rdi movl %ebx, %esi callq _Z28cudaFindMaxWithoutDivergencePii movl %eax, %r12d movl $.L.str.6, %r15d jmp .LBB10_17 .LBB10_16: movq %r14, %rdi movl %ebx, %esi callq _Z27cudaFindMaxWithSharedMemoryPii movl %eax, %r12d movl $.L.str.7, %r15d jmp .LBB10_17 .LBB10_14: movq %r14, %rdi movl %ebx, %esi callq _Z26cudaFindMaxOnDeviceNaivelyPii movl %eax, %r12d movl $.L.str.5, %r15d jmp .LBB10_17 .LBB10_13: # %_Z13findMaxOnHostPii.exit movl (%r14), %r12d movl $.L.str.4, %r15d jmp .LBB10_17 .LBB10_4: movl $.L.str.8, %r15d # implicit-def: $r12d .LBB10_17: # %_Z19checkAndPrintResultiiPc.exit movq %rsp, %rdi xorl %esi, %esi callq gettimeofday movq %r14, %rdi callq free cmpl %r12d, %ebp movl $.L.str.1, %eax movl $.L.str.2, %edi cmoveq %rax, %rdi movq %r15, %rsi xorl %eax, %eax callq printf movl $.L.str.3, %edi movl %ebp, %esi movl %r12d, %edx xorl %eax, %eax callq printf movq 8(%rsp), %rax subq 24(%rsp), %rax cvtsi2sd %rax, %xmm0 movl $.L.str, %edi movq %r15, %rsi movl %ebx, %edx movb $1, %al addq $32, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp printf # TAILCALL .Lfunc_end10: .size _Z20launchTestWithTimingii, .Lfunc_end10-_Z20launchTestWithTimingii .cfi_endproc .section .rodata,"a",@progbits .p2align 3, 0x0 .LJTI10_0: .quad .LBB10_6 .quad .LBB10_14 .quad .LBB10_15 .quad .LBB10_16 # -- End function .text .globl _Z7runTesti # -- Begin function _Z7runTesti .p2align 4, 0x90 .type _Z7runTesti,@function _Z7runTesti: # @_Z7runTesti .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 movl %edi, %ebx xorl %edi, %edi movl %ebx, %esi callq _Z20launchTestWithTimingii movl $1, %edi movl %ebx, %esi callq _Z20launchTestWithTimingii movl $1, %edi movl %ebx, %esi callq _Z20launchTestWithTimingii movl $2, %edi movl %ebx, %esi callq _Z20launchTestWithTimingii movl $3, %edi movl %ebx, %esi popq %rbx .cfi_def_cfa_offset 8 jmp _Z20launchTestWithTimingii # TAILCALL .Lfunc_end11: .size _Z7runTesti, .Lfunc_end11-_Z7runTesti .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rax .cfi_def_cfa_offset 16 xorl %edi, %edi movl $1024, %esi # imm = 0x400 callq _Z20launchTestWithTimingii movl $1, %edi movl $1024, %esi # imm = 0x400 callq _Z20launchTestWithTimingii movl $1, %edi movl $1024, %esi # imm = 0x400 callq _Z20launchTestWithTimingii movl $2, %edi movl $1024, %esi # imm = 0x400 callq _Z20launchTestWithTimingii movl $3, %edi movl $1024, %esi # imm = 0x400 callq _Z20launchTestWithTimingii xorl %edi, %edi movl $4096, %esi # imm = 0x1000 callq _Z20launchTestWithTimingii movl $1, %edi movl $4096, %esi # imm = 0x1000 callq _Z20launchTestWithTimingii movl $1, %edi movl $4096, %esi # imm = 0x1000 callq _Z20launchTestWithTimingii movl $2, %edi movl $4096, %esi # imm = 0x1000 callq _Z20launchTestWithTimingii movl $3, %edi movl $4096, %esi # imm = 0x1000 callq _Z20launchTestWithTimingii xorl %eax, %eax popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end12: .size main, .Lfunc_end12-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB13_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB13_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z20findMaxNaivelyKernelPi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z30findMaxWithoutDivergenceKernelPi, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z29findMaxWithSharedMemoryKernelPi, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end13: .size __hip_module_ctor, .Lfunc_end13-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB14_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB14_2: retq .Lfunc_end14: .size __hip_module_dtor, .Lfunc_end14-__hip_module_dtor .cfi_endproc # -- End function .type _Z20findMaxNaivelyKernelPi,@object # @_Z20findMaxNaivelyKernelPi .section .rodata,"a",@progbits .globl _Z20findMaxNaivelyKernelPi .p2align 3, 0x0 _Z20findMaxNaivelyKernelPi: .quad _Z35__device_stub__findMaxNaivelyKernelPi .size _Z20findMaxNaivelyKernelPi, 8 .type _Z30findMaxWithoutDivergenceKernelPi,@object # @_Z30findMaxWithoutDivergenceKernelPi .globl _Z30findMaxWithoutDivergenceKernelPi .p2align 3, 0x0 _Z30findMaxWithoutDivergenceKernelPi: .quad _Z45__device_stub__findMaxWithoutDivergenceKernelPi .size _Z30findMaxWithoutDivergenceKernelPi, 8 .type _Z29findMaxWithSharedMemoryKernelPi,@object # @_Z29findMaxWithSharedMemoryKernelPi .globl _Z29findMaxWithSharedMemoryKernelPi .p2align 3, 0x0 _Z29findMaxWithSharedMemoryKernelPi: .quad _Z44__device_stub__findMaxWithSharedMemoryKernelPi .size _Z29findMaxWithSharedMemoryKernelPi, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%s length %d: %g usec\n" .size .L.str, 23 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "%s passed check " .size .L.str.1, 17 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%s failed check " .size .L.str.2, 17 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "(expected %d, got %d)\n" .size .L.str.3, 23 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "Serial" .size .L.str.4, 7 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "Naive Parallel" .size .L.str.5, 15 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "Non Divergent Parallel" .size .L.str.6, 23 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "Shared Memory Parallel" .size .L.str.7, 23 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "Unrecognized!" .size .L.str.8, 14 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z20findMaxNaivelyKernelPi" .size .L__unnamed_1, 27 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z30findMaxWithoutDivergenceKernelPi" .size .L__unnamed_2, 37 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z29findMaxWithSharedMemoryKernelPi" .size .L__unnamed_3, 36 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z35__device_stub__findMaxNaivelyKernelPi .addrsig_sym _Z45__device_stub__findMaxWithoutDivergenceKernelPi .addrsig_sym _Z44__device_stub__findMaxWithSharedMemoryKernelPi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z20findMaxNaivelyKernelPi .addrsig_sym _Z30findMaxWithoutDivergenceKernelPi .addrsig_sym _Z29findMaxWithSharedMemoryKernelPi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <cuda_runtime_api.h> #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> // Add your kernel here __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } // main int main(void) { int a, b, c; int *d_a, *d_b, *d_c; int size = sizeof(int); // Allocate memory in Device cudaMalloc ((void **) &d_a, size); cudaMalloc ((void **) &d_b, size); cudaMalloc ((void **) &d_c, size); // Initialize value a = 2; b = 7; // Copy data from Host to Device cudaMemcpy (d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy (d_b, &b, size, cudaMemcpyHostToDevice); // Execute add<<<1,1>>>(d_a, d_b, d_c); // Copy result back to Host // Take note that it will be smart enough to wait // until the task at device completed cudaMemcpy (&c, d_c, size, cudaMemcpyDeviceToHost); // Clean up cudaFree (d_a); cudaFree (d_b); cudaFree (d_c); printf("Task Completed: c = %d + %d = %d\n" ,a, b, c); return 0; }
code for sm_80 Function : _Z3addPiS_S_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff027624 */ /* 0x000fe200078e00ff */ /*0020*/ MOV R5, c[0x0][0x16c] ; /* 0x00005b0000057a02 */ /* 0x000fe20000000f00 */ /*0030*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff047624 */ /* 0x000fe200078e00ff */ /*0040*/ MOV R3, c[0x0][0x164] ; /* 0x0000590000037a02 */ /* 0x000fe20000000f00 */ /*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0060*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea8000c1e1900 */ /*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*0080*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff067624 */ /* 0x000fe200078e00ff */ /*0090*/ MOV R7, c[0x0][0x174] ; /* 0x00005d0000077a02 */ /* 0x000fe40000000f00 */ /*00a0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */ /* 0x004fca0007ffe0ff */ /*00b0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00c0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cuda_runtime_api.h> #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> // Add your kernel here __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } // main int main(void) { int a, b, c; int *d_a, *d_b, *d_c; int size = sizeof(int); // Allocate memory in Device cudaMalloc ((void **) &d_a, size); cudaMalloc ((void **) &d_b, size); cudaMalloc ((void **) &d_c, size); // Initialize value a = 2; b = 7; // Copy data from Host to Device cudaMemcpy (d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy (d_b, &b, size, cudaMemcpyHostToDevice); // Execute add<<<1,1>>>(d_a, d_b, d_c); // Copy result back to Host // Take note that it will be smart enough to wait // until the task at device completed cudaMemcpy (&c, d_c, size, cudaMemcpyDeviceToHost); // Clean up cudaFree (d_a); cudaFree (d_b); cudaFree (d_c); printf("Task Completed: c = %d + %d = %d\n" ,a, b, c); return 0; }
.file "tmpxft_00126b24_00000000-6_VectorAdd.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z26__device_stub__Z3addPiS_S_PiS_S_ .type _Z26__device_stub__Z3addPiS_S_PiS_S_, @function _Z26__device_stub__Z3addPiS_S_PiS_S_: .LFB2082: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z3addPiS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z26__device_stub__Z3addPiS_S_PiS_S_, .-_Z26__device_stub__Z3addPiS_S_PiS_S_ .globl _Z3addPiS_S_ .type _Z3addPiS_S_, @function _Z3addPiS_S_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z3addPiS_S_PiS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z3addPiS_S_, .-_Z3addPiS_S_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "Task Completed: c = %d + %d = %d\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $72, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax leaq 8(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movl $2, (%rsp) movl $7, 4(%rsp) movq %rsp, %rsi movl $1, %ecx movl $4, %edx movq 8(%rsp), %rdi call cudaMemcpy@PLT leaq 4(%rsp), %rsi movl $1, %ecx movl $4, %edx movq 16(%rsp), %rdi call cudaMemcpy@PLT movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L15 .L12: leaq 44(%rsp), %rdi movl $2, %ecx movl $4, %edx movq 24(%rsp), %rsi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movl 44(%rsp), %r8d movl 4(%rsp), %ecx movl (%rsp), %edx leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L16 movl $0, %eax addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z26__device_stub__Z3addPiS_S_PiS_S_ jmp .L12 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "_Z3addPiS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z3addPiS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cuda_runtime_api.h> #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> // Add your kernel here __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } // main int main(void) { int a, b, c; int *d_a, *d_b, *d_c; int size = sizeof(int); // Allocate memory in Device cudaMalloc ((void **) &d_a, size); cudaMalloc ((void **) &d_b, size); cudaMalloc ((void **) &d_c, size); // Initialize value a = 2; b = 7; // Copy data from Host to Device cudaMemcpy (d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy (d_b, &b, size, cudaMemcpyHostToDevice); // Execute add<<<1,1>>>(d_a, d_b, d_c); // Copy result back to Host // Take note that it will be smart enough to wait // until the task at device completed cudaMemcpy (&c, d_c, size, cudaMemcpyDeviceToHost); // Clean up cudaFree (d_a); cudaFree (d_b); cudaFree (d_c); printf("Task Completed: c = %d + %d = %d\n" ,a, b, c); return 0; }
#include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <stdio.h> #include <stdlib.h> // Add your kernel here __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } // main int main(void) { int a, b, c; int *d_a, *d_b, *d_c; int size = sizeof(int); // Allocate memory in Device hipMalloc ((void **) &d_a, size); hipMalloc ((void **) &d_b, size); hipMalloc ((void **) &d_c, size); // Initialize value a = 2; b = 7; // Copy data from Host to Device hipMemcpy (d_a, &a, size, hipMemcpyHostToDevice); hipMemcpy (d_b, &b, size, hipMemcpyHostToDevice); // Execute add<<<1,1>>>(d_a, d_b, d_c); // Copy result back to Host // Take note that it will be smart enough to wait // until the task at device completed hipMemcpy (&c, d_c, size, hipMemcpyDeviceToHost); // Clean up hipFree (d_a); hipFree (d_b); hipFree (d_c); printf("Task Completed: c = %d + %d = %d\n" ,a, b, c); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <stdio.h> #include <stdlib.h> // Add your kernel here __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } // main int main(void) { int a, b, c; int *d_a, *d_b, *d_c; int size = sizeof(int); // Allocate memory in Device hipMalloc ((void **) &d_a, size); hipMalloc ((void **) &d_b, size); hipMalloc ((void **) &d_c, size); // Initialize value a = 2; b = 7; // Copy data from Host to Device hipMemcpy (d_a, &a, size, hipMemcpyHostToDevice); hipMemcpy (d_b, &b, size, hipMemcpyHostToDevice); // Execute add<<<1,1>>>(d_a, d_b, d_c); // Copy result back to Host // Take note that it will be smart enough to wait // until the task at device completed hipMemcpy (&c, d_c, size, hipMemcpyDeviceToHost); // Clean up hipFree (d_a); hipFree (d_b); hipFree (d_c); printf("Task Completed: c = %d + %d = %d\n" ,a, b, c); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z3addPiS_S_ .globl _Z3addPiS_S_ .p2align 8 .type _Z3addPiS_S_,@function _Z3addPiS_S_: s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_load_b32 s2, s[4:5], 0x0 s_load_b32 s3, s[6:7], 0x0 s_waitcnt lgkmcnt(0) s_add_i32 s2, s3, s2 s_delay_alu instid0(SALU_CYCLE_1) v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z3addPiS_S_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 2 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z3addPiS_S_, .Lfunc_end0-_Z3addPiS_S_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z3addPiS_S_ .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z3addPiS_S_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 2 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <stdio.h> #include <stdlib.h> // Add your kernel here __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } // main int main(void) { int a, b, c; int *d_a, *d_b, *d_c; int size = sizeof(int); // Allocate memory in Device hipMalloc ((void **) &d_a, size); hipMalloc ((void **) &d_b, size); hipMalloc ((void **) &d_c, size); // Initialize value a = 2; b = 7; // Copy data from Host to Device hipMemcpy (d_a, &a, size, hipMemcpyHostToDevice); hipMemcpy (d_b, &b, size, hipMemcpyHostToDevice); // Execute add<<<1,1>>>(d_a, d_b, d_c); // Copy result back to Host // Take note that it will be smart enough to wait // until the task at device completed hipMemcpy (&c, d_c, size, hipMemcpyDeviceToHost); // Clean up hipFree (d_a); hipFree (d_b); hipFree (d_c); printf("Task Completed: c = %d + %d = %d\n" ,a, b, c); return 0; }
.text .file "VectorAdd.hip" .globl _Z18__device_stub__addPiS_S_ # -- Begin function _Z18__device_stub__addPiS_S_ .p2align 4, 0x90 .type _Z18__device_stub__addPiS_S_,@function _Z18__device_stub__addPiS_S_: # @_Z18__device_stub__addPiS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z3addPiS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z18__device_stub__addPiS_S_, .Lfunc_end0-_Z18__device_stub__addPiS_S_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 leaq 24(%rsp), %rdi movl $4, %esi callq hipMalloc leaq 16(%rsp), %rdi movl $4, %esi callq hipMalloc leaq 8(%rsp), %rdi movl $4, %esi callq hipMalloc movl $2, 4(%rsp) movl $7, (%rsp) movq 24(%rsp), %rdi leaq 4(%rsp), %rsi movl $4, %edx movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movq %rsp, %rsi movl $4, %edx movl $1, %ecx callq hipMemcpy movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 8(%rsp), %rdx movq %rax, 128(%rsp) movq %rcx, 120(%rsp) movq %rdx, 112(%rsp) leaq 128(%rsp), %rax movq %rax, 32(%rsp) leaq 120(%rsp), %rax movq %rax, 40(%rsp) leaq 112(%rsp), %rax movq %rax, 48(%rsp) leaq 96(%rsp), %rdi leaq 80(%rsp), %rsi leaq 72(%rsp), %rdx leaq 64(%rsp), %rcx callq __hipPopCallConfiguration movq 96(%rsp), %rsi movl 104(%rsp), %edx movq 80(%rsp), %rcx movl 88(%rsp), %r8d leaq 32(%rsp), %r9 movl $_Z3addPiS_S_, %edi pushq 64(%rsp) .cfi_adjust_cfa_offset 8 pushq 80(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: movq 8(%rsp), %rsi leaq 32(%rsp), %rdi movl $4, %edx movl $2, %ecx callq hipMemcpy movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movl 4(%rsp), %esi movl (%rsp), %edx movl 32(%rsp), %ecx movl $.L.str, %edi xorl %eax, %eax callq printf xorl %eax, %eax addq $136, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z3addPiS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z3addPiS_S_,@object # @_Z3addPiS_S_ .section .rodata,"a",@progbits .globl _Z3addPiS_S_ .p2align 3, 0x0 _Z3addPiS_S_: .quad _Z18__device_stub__addPiS_S_ .size _Z3addPiS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Task Completed: c = %d + %d = %d\n" .size .L.str, 34 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z3addPiS_S_" .size .L__unnamed_1, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z18__device_stub__addPiS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z3addPiS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z3addPiS_S_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff027624 */ /* 0x000fe200078e00ff */ /*0020*/ MOV R5, c[0x0][0x16c] ; /* 0x00005b0000057a02 */ /* 0x000fe20000000f00 */ /*0030*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff047624 */ /* 0x000fe200078e00ff */ /*0040*/ MOV R3, c[0x0][0x164] ; /* 0x0000590000037a02 */ /* 0x000fe20000000f00 */ /*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0060*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea8000c1e1900 */ /*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*0080*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff067624 */ /* 0x000fe200078e00ff */ /*0090*/ MOV R7, c[0x0][0x174] ; /* 0x00005d0000077a02 */ /* 0x000fe40000000f00 */ /*00a0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */ /* 0x004fca0007ffe0ff */ /*00b0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00c0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z3addPiS_S_ .globl _Z3addPiS_S_ .p2align 8 .type _Z3addPiS_S_,@function _Z3addPiS_S_: s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_load_b32 s2, s[4:5], 0x0 s_load_b32 s3, s[6:7], 0x0 s_waitcnt lgkmcnt(0) s_add_i32 s2, s3, s2 s_delay_alu instid0(SALU_CYCLE_1) v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z3addPiS_S_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 2 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z3addPiS_S_, .Lfunc_end0-_Z3addPiS_S_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z3addPiS_S_ .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z3addPiS_S_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 2 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00126b24_00000000-6_VectorAdd.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z26__device_stub__Z3addPiS_S_PiS_S_ .type _Z26__device_stub__Z3addPiS_S_PiS_S_, @function _Z26__device_stub__Z3addPiS_S_PiS_S_: .LFB2082: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z3addPiS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z26__device_stub__Z3addPiS_S_PiS_S_, .-_Z26__device_stub__Z3addPiS_S_PiS_S_ .globl _Z3addPiS_S_ .type _Z3addPiS_S_, @function _Z3addPiS_S_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z3addPiS_S_PiS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z3addPiS_S_, .-_Z3addPiS_S_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "Task Completed: c = %d + %d = %d\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $72, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax leaq 8(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movl $2, (%rsp) movl $7, 4(%rsp) movq %rsp, %rsi movl $1, %ecx movl $4, %edx movq 8(%rsp), %rdi call cudaMemcpy@PLT leaq 4(%rsp), %rsi movl $1, %ecx movl $4, %edx movq 16(%rsp), %rdi call cudaMemcpy@PLT movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L15 .L12: leaq 44(%rsp), %rdi movl $2, %ecx movl $4, %edx movq 24(%rsp), %rsi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movl 44(%rsp), %r8d movl 4(%rsp), %ecx movl (%rsp), %edx leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L16 movl $0, %eax addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z26__device_stub__Z3addPiS_S_PiS_S_ jmp .L12 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "_Z3addPiS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z3addPiS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "VectorAdd.hip" .globl _Z18__device_stub__addPiS_S_ # -- Begin function _Z18__device_stub__addPiS_S_ .p2align 4, 0x90 .type _Z18__device_stub__addPiS_S_,@function _Z18__device_stub__addPiS_S_: # @_Z18__device_stub__addPiS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z3addPiS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z18__device_stub__addPiS_S_, .Lfunc_end0-_Z18__device_stub__addPiS_S_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 leaq 24(%rsp), %rdi movl $4, %esi callq hipMalloc leaq 16(%rsp), %rdi movl $4, %esi callq hipMalloc leaq 8(%rsp), %rdi movl $4, %esi callq hipMalloc movl $2, 4(%rsp) movl $7, (%rsp) movq 24(%rsp), %rdi leaq 4(%rsp), %rsi movl $4, %edx movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movq %rsp, %rsi movl $4, %edx movl $1, %ecx callq hipMemcpy movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 8(%rsp), %rdx movq %rax, 128(%rsp) movq %rcx, 120(%rsp) movq %rdx, 112(%rsp) leaq 128(%rsp), %rax movq %rax, 32(%rsp) leaq 120(%rsp), %rax movq %rax, 40(%rsp) leaq 112(%rsp), %rax movq %rax, 48(%rsp) leaq 96(%rsp), %rdi leaq 80(%rsp), %rsi leaq 72(%rsp), %rdx leaq 64(%rsp), %rcx callq __hipPopCallConfiguration movq 96(%rsp), %rsi movl 104(%rsp), %edx movq 80(%rsp), %rcx movl 88(%rsp), %r8d leaq 32(%rsp), %r9 movl $_Z3addPiS_S_, %edi pushq 64(%rsp) .cfi_adjust_cfa_offset 8 pushq 80(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: movq 8(%rsp), %rsi leaq 32(%rsp), %rdi movl $4, %edx movl $2, %ecx callq hipMemcpy movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movl 4(%rsp), %esi movl (%rsp), %edx movl 32(%rsp), %ecx movl $.L.str, %edi xorl %eax, %eax callq printf xorl %eax, %eax addq $136, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z3addPiS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z3addPiS_S_,@object # @_Z3addPiS_S_ .section .rodata,"a",@progbits .globl _Z3addPiS_S_ .p2align 3, 0x0 _Z3addPiS_S_: .quad _Z18__device_stub__addPiS_S_ .size _Z3addPiS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Task Completed: c = %d + %d = %d\n" .size .L.str, 34 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z3addPiS_S_" .size .L__unnamed_1, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z18__device_stub__addPiS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z3addPiS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> #include <fstream> #include <vector> #include <stdio.h> #include <algorithm> #include <time.h> using namespace std; void readInt(int &n, int &m) { ifstream fin_n("data/nums.txt"); fin_n >> n >> m; } void readGraph(unsigned long long *neib, int n, int m) { ifstream fin_g("data/graph.txt"); vector<vector<int> > vert; vert.resize(n); for (int i = 0; i < m; ++i) { int u, v; fin_g >> u >> v; u--, v--; neib[i] = ((unsigned long long)u << 32) + v; } } __global__ void select_winner_odd(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < e_num) { unsigned long long temp = edge_list[tid]; int u, v; u = temp & 0xffffffff; v = temp >> 32; if (parent[u] != parent[v]) { parent[max(parent[u], parent[v])] = parent[min(parent[u], parent[v])]; *flag = 1; } else { mark[tid] = 1; } } } __global__ void select_winner_even(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < e_num) { unsigned long long temp = edge_list[tid]; unsigned int u, v; u = temp & 0xffffffff; v = (temp >> 32) & 0xffffffff; if (parent[u] != parent[v]) { parent[min(parent[u], parent[v])] = parent[max(parent[u], parent[v])]; *flag = 1; } else { mark[tid] = 1; } } } __global__ void jump(int *parent, int v_num, int *flag) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < v_num) { int p = parent[tid]; int p_p = parent[p]; if (p != p_p) { parent[tid] = p_p; (*flag) = 1; } } } int main() { int n, m; readInt(n, m); unsigned long long *h_edge_list, *d_edge_list; h_edge_list = (unsigned long long*)malloc(m * sizeof(unsigned long long)); readGraph(h_edge_list, n, m); int h_parent[n], *d_parent; int h_mark[m], *d_mark; for (int i = 0; i < n; ++i) { h_parent[i] = i; } for (int i = 0; i < m; ++i) { h_mark[i] = 0; } int flag[1], *d_flag; int count = 0; clock_t beg = clock(); do { flag[0] = 0; cudaMalloc(&d_parent, n * sizeof(int)); cudaMalloc(&d_edge_list, m * sizeof(unsigned long long)); cudaMalloc(&d_mark, m * sizeof(int)); cudaMalloc(&d_flag, sizeof(int)); cudaMemcpy(d_parent, h_parent, n * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_edge_list, h_edge_list, m * sizeof(unsigned long long), cudaMemcpyHostToDevice); cudaMemcpy(d_mark, h_mark, m * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_flag, flag, sizeof(int), cudaMemcpyHostToDevice); if (count) { select_winner_odd<<<256, 256>>>(d_parent, d_edge_list, d_mark, d_flag, m); } else { select_winner_even<<<256, 256>>>(d_parent, d_edge_list, d_mark, d_flag, m); } cudaThreadSynchronize(); cudaMemcpy(flag, d_flag, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_parent, d_parent, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(&d_parent); cudaFree(&d_edge_list); cudaFree(&d_mark); cudaFree(&d_flag); if (!flag[0]) { break; } count ^= 1; do { flag[0] = 0; cudaMalloc(&d_flag, sizeof(int)); cudaMalloc(&d_parent, n * sizeof(int)); cudaMemcpy(d_flag, flag, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_parent, h_parent, n * sizeof(int), cudaMemcpyHostToDevice); jump<<<256, 256>>>(d_parent, n, d_flag); cudaThreadSynchronize(); cudaMemcpy(flag, d_flag, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_parent, d_parent, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(&d_flag); cudaFree(&d_parent); } while(flag[0]); } while(flag); cout << float(clock() - beg) / CLOCKS_PER_SEC << endl; sort(h_parent, h_parent + n); cout << unique(h_parent, h_parent + n) - h_parent; }
code for sm_80 Function : _Z4jumpPiiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fca00078e0205 */ /*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */ /* 0x000ea4000c1e1900 */ /*00a0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */ /* 0x004fca00078e0205 */ /*00b0*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */ /* 0x000ea4000c1e1900 */ /*00c0*/ ISETP.NE.AND P0, PT, R0, R7, PT ; /* 0x000000070000720c */ /* 0x004fda0003f05270 */ /*00d0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*00e0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x1 ; /* 0x00000001ff097424 */ /* 0x000fe200078e00ff */ /*00f0*/ MOV R4, c[0x0][0x170] ; /* 0x00005c0000047a02 */ /* 0x000fe20000000f00 */ /*0100*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff057624 */ /* 0x000fe200078e00ff */ /*0110*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x000fe8000c101904 */ /*0120*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */ /* 0x000fe2000c101904 */ /*0130*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0140*/ BRA 0x140; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z18select_winner_evenPiPyS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */ /* 0x000fe200078e00ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0080*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */ /* 0x000fcc00078e0203 */ /*0090*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea4000c1e1b00 */ /*00a0*/ IMAD.SHL.U32 R6, R2.reuse, 0x4, RZ ; /* 0x0000000402067824 */ /* 0x044fe200078e00ff */ /*00b0*/ SHF.R.U64 R4, R2.reuse, 0x1e, R3.reuse ; /* 0x0000001e02047819 */ /* 0x140fe40000001203 */ /*00c0*/ SHF.L.U64.HI R7, R2, 0x2, R3.reuse ; /* 0x0000000202077819 */ /* 0x100fe40000010203 */ /*00d0*/ LOP3.LUT R6, R6, 0xfffffffc, RZ, 0xc0, !PT ; /* 0xfffffffc06067812 */ /* 0x000fe400078ec0ff */ /*00e0*/ SHF.R.U32.HI R5, RZ, 0x1e, R3 ; /* 0x0000001eff057819 */ /* 0x000fe40000011603 */ /*00f0*/ LOP3.LUT R4, R4, 0xfffffffc, RZ, 0xc0, !PT ; /* 0xfffffffc04047812 */ /* 0x000fc400078ec0ff */ /*0100*/ LOP3.LUT R7, R7, 0x3, RZ, 0xc0, !PT ; /* 0x0000000307077812 */ /* 0x000fe400078ec0ff */ /*0110*/ IADD3 R6, P0, R6, c[0x0][0x160], RZ ; /* 0x0000580006067a10 */ /* 0x000fe40007f1e0ff */ /*0120*/ LOP3.LUT R5, R5, 0x3, RZ, 0xc0, !PT ; /* 0x0000000305057812 */ /* 0x000fe400078ec0ff */ /*0130*/ IADD3 R4, P1, R4, c[0x0][0x160], RZ ; /* 0x0000580004047a10 */ /* 0x000fe40007f3e0ff */ /*0140*/ IADD3.X R7, R7, c[0x0][0x164], RZ, P0, !PT ; /* 0x0000590007077a10 */ /* 0x000fe400007fe4ff */ /*0150*/ IADD3.X R5, R5, c[0x0][0x164], RZ, P1, !PT ; /* 0x0000590005057a10 */ /* 0x000fc80000ffe4ff */ /*0160*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */ /* 0x000ea8000c1e1900 */ /*0170*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*0180*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */ /* 0x000fe40000011400 */ /*0190*/ ISETP.NE.AND P0, PT, R7, R4, PT ; /* 0x000000040700720c */ /* 0x004fda0003f05270 */ /*01a0*/ @!P0 BRA 0x270 ; /* 0x000000c000008947 */ /* 0x000fea0003800000 */ /*01b0*/ IMNMX R2, R4, R7, !PT ; /* 0x0000000704027217 */ /* 0x000fe20007800200 */ /*01c0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x000fc800078e00ff */ /*01d0*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fcc00078e0205 */ /*01e0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*01f0*/ IMNMX R4, R4, R7, PT ; /* 0x0000000704047217 */ /* 0x000fe20003800200 */ /*0200*/ IMAD.MOV.U32 R9, RZ, RZ, 0x1 ; /* 0x00000001ff097424 */ /* 0x000fe400078e00ff */ /*0210*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff067624 */ /* 0x000fe400078e00ff */ /*0220*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fc800078e0205 */ /*0230*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff077624 */ /* 0x000fe200078e00ff */ /*0240*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe8000c101904 */ /*0250*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*0260*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0270*/ LEA R2, P0, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000027a11 */ /* 0x000fe200078010ff */ /*0280*/ IMAD.MOV.U32 R5, RZ, RZ, 0x1 ; /* 0x00000001ff057424 */ /* 0x000fc600078e00ff */ /*0290*/ LEA.HI.X R3, R0, c[0x0][0x174], R3, 0x2, P0 ; /* 0x00005d0000037a11 */ /* 0x000fca00000f1403 */ /*02a0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*02b0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*02c0*/ BRA 0x2c0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z17select_winner_oddPiPyS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */ /* 0x000fe200078e00ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0080*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */ /* 0x000fcc00078e0203 */ /*0090*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea4000c1e1b00 */ /*00a0*/ SHF.R.S64 R6, RZ, 0x1e, R2 ; /* 0x0000001eff067819 */ /* 0x004fe40000001002 */ /*00b0*/ SHF.R.S32.HI R5, RZ, 0x1f, R3 ; /* 0x0000001fff057819 */ /* 0x000fe40000011403 */ /*00c0*/ IADD3 R6, P0, R6, c[0x0][0x160], RZ ; /* 0x0000580006067a10 */ /* 0x000fe40007f1e0ff */ /*00d0*/ LEA R4, P1, R3, c[0x0][0x160], 0x2 ; /* 0x0000580003047a11 */ /* 0x000fe400078210ff */ /*00e0*/ LEA.HI.X.SX32 R7, R2, c[0x0][0x164], 0x2, P0 ; /* 0x0000590002077a11 */ /* 0x000fc400000f16ff */ /*00f0*/ LEA.HI.X R5, R3, c[0x0][0x164], R5, 0x2, P1 ; /* 0x0000590003057a11 */ /* 0x000fc800008f1405 */ /*0100*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */ /* 0x000ea8000c1e1900 */ /*0110*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*0120*/ SHF.R.S32.HI R9, RZ, 0x1f, R0 ; /* 0x0000001fff097819 */ /* 0x000fe40000011400 */ /*0130*/ ISETP.NE.AND P0, PT, R7, R4, PT ; /* 0x000000040700720c */ /* 0x004fda0003f05270 */ /*0140*/ @!P0 BRA 0x210 ; /* 0x000000c000008947 */ /* 0x000fea0003800000 */ /*0150*/ IMNMX R2, R4, R7, PT ; /* 0x0000000704027217 */ /* 0x000fe20003800200 */ /*0160*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x000fc800078e00ff */ /*0170*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fcc00078e0205 */ /*0180*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*0190*/ IMNMX R4, R4, R7, !PT ; /* 0x0000000704047217 */ /* 0x000fe20007800200 */ /*01a0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x1 ; /* 0x00000001ff097424 */ /* 0x000fe400078e00ff */ /*01b0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff067624 */ /* 0x000fe400078e00ff */ /*01c0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fc800078e0205 */ /*01d0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff077624 */ /* 0x000fe200078e00ff */ /*01e0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe8000c101904 */ /*01f0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*0200*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0210*/ LEA R2, P0, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000027a11 */ /* 0x000fe200078010ff */ /*0220*/ IMAD.MOV.U32 R5, RZ, RZ, 0x1 ; /* 0x00000001ff057424 */ /* 0x000fc600078e00ff */ /*0230*/ LEA.HI.X R3, R0, c[0x0][0x174], R9, 0x2, P0 ; /* 0x00005d0000037a11 */ /* 0x000fca00000f1409 */ /*0240*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*0250*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0260*/ BRA 0x260; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0280*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0290*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <iostream> #include <fstream> #include <vector> #include <stdio.h> #include <algorithm> #include <time.h> using namespace std; void readInt(int &n, int &m) { ifstream fin_n("data/nums.txt"); fin_n >> n >> m; } void readGraph(unsigned long long *neib, int n, int m) { ifstream fin_g("data/graph.txt"); vector<vector<int> > vert; vert.resize(n); for (int i = 0; i < m; ++i) { int u, v; fin_g >> u >> v; u--, v--; neib[i] = ((unsigned long long)u << 32) + v; } } __global__ void select_winner_odd(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < e_num) { unsigned long long temp = edge_list[tid]; int u, v; u = temp & 0xffffffff; v = temp >> 32; if (parent[u] != parent[v]) { parent[max(parent[u], parent[v])] = parent[min(parent[u], parent[v])]; *flag = 1; } else { mark[tid] = 1; } } } __global__ void select_winner_even(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < e_num) { unsigned long long temp = edge_list[tid]; unsigned int u, v; u = temp & 0xffffffff; v = (temp >> 32) & 0xffffffff; if (parent[u] != parent[v]) { parent[min(parent[u], parent[v])] = parent[max(parent[u], parent[v])]; *flag = 1; } else { mark[tid] = 1; } } } __global__ void jump(int *parent, int v_num, int *flag) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < v_num) { int p = parent[tid]; int p_p = parent[p]; if (p != p_p) { parent[tid] = p_p; (*flag) = 1; } } } int main() { int n, m; readInt(n, m); unsigned long long *h_edge_list, *d_edge_list; h_edge_list = (unsigned long long*)malloc(m * sizeof(unsigned long long)); readGraph(h_edge_list, n, m); int h_parent[n], *d_parent; int h_mark[m], *d_mark; for (int i = 0; i < n; ++i) { h_parent[i] = i; } for (int i = 0; i < m; ++i) { h_mark[i] = 0; } int flag[1], *d_flag; int count = 0; clock_t beg = clock(); do { flag[0] = 0; cudaMalloc(&d_parent, n * sizeof(int)); cudaMalloc(&d_edge_list, m * sizeof(unsigned long long)); cudaMalloc(&d_mark, m * sizeof(int)); cudaMalloc(&d_flag, sizeof(int)); cudaMemcpy(d_parent, h_parent, n * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_edge_list, h_edge_list, m * sizeof(unsigned long long), cudaMemcpyHostToDevice); cudaMemcpy(d_mark, h_mark, m * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_flag, flag, sizeof(int), cudaMemcpyHostToDevice); if (count) { select_winner_odd<<<256, 256>>>(d_parent, d_edge_list, d_mark, d_flag, m); } else { select_winner_even<<<256, 256>>>(d_parent, d_edge_list, d_mark, d_flag, m); } cudaThreadSynchronize(); cudaMemcpy(flag, d_flag, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_parent, d_parent, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(&d_parent); cudaFree(&d_edge_list); cudaFree(&d_mark); cudaFree(&d_flag); if (!flag[0]) { break; } count ^= 1; do { flag[0] = 0; cudaMalloc(&d_flag, sizeof(int)); cudaMalloc(&d_parent, n * sizeof(int)); cudaMemcpy(d_flag, flag, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_parent, h_parent, n * sizeof(int), cudaMemcpyHostToDevice); jump<<<256, 256>>>(d_parent, n, d_flag); cudaThreadSynchronize(); cudaMemcpy(flag, d_flag, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_parent, d_parent, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(&d_flag); cudaFree(&d_parent); } while(flag[0]); } while(flag); cout << float(clock() - beg) / CLOCKS_PER_SEC << endl; sort(h_parent, h_parent + n); cout << unique(h_parent, h_parent + n) - h_parent; }
.file "tmpxft_00063b8f_00000000-6_fast.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB4432: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4432: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z44__device_stub__Z17select_winner_oddPiPyS_S_iPiPyS_S_i .type _Z44__device_stub__Z17select_winner_oddPiPyS_S_iPiPyS_S_i, @function _Z44__device_stub__Z17select_winner_oddPiPyS_S_iPiPyS_S_i: .LFB4454: .cfi_startproc endbr64 subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movl %r8d, 12(%rsp) movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 12(%rsp), %rax movq %rax, 144(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 152(%rsp), %rax subq %fs:40, %rax jne .L8 addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 184 pushq 56(%rsp) .cfi_def_cfa_offset 192 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z17select_winner_oddPiPyS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE4454: .size _Z44__device_stub__Z17select_winner_oddPiPyS_S_iPiPyS_S_i, .-_Z44__device_stub__Z17select_winner_oddPiPyS_S_iPiPyS_S_i .globl _Z17select_winner_oddPiPyS_S_i .type _Z17select_winner_oddPiPyS_S_i, @function _Z17select_winner_oddPiPyS_S_i: .LFB4455: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z44__device_stub__Z17select_winner_oddPiPyS_S_iPiPyS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4455: .size _Z17select_winner_oddPiPyS_S_i, .-_Z17select_winner_oddPiPyS_S_i .globl _Z45__device_stub__Z18select_winner_evenPiPyS_S_iPiPyS_S_i .type _Z45__device_stub__Z18select_winner_evenPiPyS_S_iPiPyS_S_i, @function _Z45__device_stub__Z18select_winner_evenPiPyS_S_iPiPyS_S_i: .LFB4456: .cfi_startproc endbr64 subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movl %r8d, 12(%rsp) movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 12(%rsp), %rax movq %rax, 144(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 152(%rsp), %rax subq %fs:40, %rax jne .L16 addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 184 pushq 56(%rsp) .cfi_def_cfa_offset 192 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z18select_winner_evenPiPyS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE4456: .size _Z45__device_stub__Z18select_winner_evenPiPyS_S_iPiPyS_S_i, .-_Z45__device_stub__Z18select_winner_evenPiPyS_S_iPiPyS_S_i .globl _Z18select_winner_evenPiPyS_S_i .type _Z18select_winner_evenPiPyS_S_i, @function _Z18select_winner_evenPiPyS_S_i: .LFB4457: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z45__device_stub__Z18select_winner_evenPiPyS_S_iPiPyS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4457: .size _Z18select_winner_evenPiPyS_S_i, .-_Z18select_winner_evenPiPyS_S_i .globl _Z26__device_stub__Z4jumpPiiS_PiiS_ .type _Z26__device_stub__Z4jumpPiiS_PiiS_, @function _Z26__device_stub__Z4jumpPiiS_PiiS_: .LFB4458: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L23 .L19: movq 120(%rsp), %rax subq %fs:40, %rax jne .L24 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z4jumpPiiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L19 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE4458: .size _Z26__device_stub__Z4jumpPiiS_PiiS_, .-_Z26__device_stub__Z4jumpPiiS_PiiS_ .globl _Z4jumpPiiS_ .type _Z4jumpPiiS_, @function _Z4jumpPiiS_: .LFB4459: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z4jumpPiiS_PiiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4459: .size _Z4jumpPiiS_, .-_Z4jumpPiiS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z4jumpPiiS_" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "_Z18select_winner_evenPiPyS_S_i" .align 8 .LC2: .string "_Z17select_winner_oddPiPyS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB4461: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z4jumpPiiS_(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z18select_winner_evenPiPyS_S_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z17select_winner_oddPiPyS_S_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4461: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .rodata.str1.1 .LC3: .string "data/graph.txt" .LC4: .string "vector::_M_default_append" .text .globl _Z9readGraphPyii .type _Z9readGraphPyii, @function _Z9readGraphPyii: .LFB4418: .cfi_startproc .cfi_personality 0x9b,DW.ref.__gxx_personality_v0 .cfi_lsda 0x1b,.LLSDA4418 endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $568, %rsp .cfi_def_cfa_offset 624 movq %rdi, %r12 movl %esi, %ebx movl %edx, %ebp movq %fs:40, %rax movq %rax, 552(%rsp) xorl %eax, %eax leaq 32(%rsp), %r13 leaq 288(%rsp), %rdi call _ZNSt8ios_baseC2Ev@PLT leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax movq %rax, 288(%rsp) movq $0, 504(%rsp) movb $0, 512(%rsp) movb $0, 513(%rsp) movq $0, 520(%rsp) movq $0, 528(%rsp) movq $0, 536(%rsp) movq $0, 544(%rsp) movq 8+_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rax movq %rax, 32(%rsp) movq -24(%rax), %rax movq 16+_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rcx movq %rcx, 32(%rsp,%rax) movq $0, 40(%rsp) movq 32(%rsp), %rax movq %r13, %rdi addq -24(%rax), %rdi movl $0, %esi .LEHB0: call _ZNSt9basic_iosIcSt11char_traitsIcEE4initEPSt15basic_streambufIcS1_E@PLT .LEHE0: leaq 24+_ZTVSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rax movq %rax, 32(%rsp) leaq 40(%rax), %rax movq %rax, 288(%rsp) leaq 48(%rsp), %rdi .LEHB1: call _ZNSt13basic_filebufIcSt11char_traitsIcEEC1Ev@PLT .LEHE1: leaq 48(%rsp), %rsi leaq 288(%rsp), %rdi .LEHB2: call _ZNSt9basic_iosIcSt11char_traitsIcEE4initEPSt15basic_streambufIcS1_E@PLT leaq 48(%rsp), %rdi movl $8, %edx leaq .LC3(%rip), %rsi call _ZNSt13basic_filebufIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode@PLT testq %rax, %rax je .L74 movq 32(%rsp), %rax movq -24(%rax), %rax leaq 32(%rsp,%rax), %rdi movl $0, %esi call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT jmp .L31 .L74: movq 32(%rsp), %rax movq -24(%rax), %rax leaq 32(%rsp,%rax), %rdi movl 32(%rdi), %esi orl $4, %esi call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT .LEHE2: .L31: movslq %ebx, %rbx testq %rbx, %rbx jne .L75 movq $0, 8(%rsp) movl $0, %r13d movl $0, %r15d testl %ebp, %ebp jle .L48 .L57: movl $0, %ebx leaq 24(%rsp), %r14 jmp .L44 .L64: endbr64 movq %rax, %rbx leaq 48(%rsp), %rdi call _ZNSt13basic_filebufIcSt11char_traitsIcEED1Ev@PLT .L35: movq 8+_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rax movq %rax, 32(%rsp) movq -24(%rax), %rax movq 16+_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rcx movq %rcx, 32(%rsp,%rax) movq $0, 40(%rsp) .L36: leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax movq %rax, 288(%rsp) leaq 288(%rsp), %rdi call _ZNSt8ios_baseD2Ev@PLT movq 552(%rsp), %rax subq %fs:40, %rax je .L37 call __stack_chk_fail@PLT .L63: endbr64 movq %rax, %rbx jmp .L35 .L62: endbr64 movq %rax, %rbx jmp .L36 .L37: movq %rbx, %rdi .LEHB3: call _Unwind_Resume@PLT .LEHE3: .L75: movabsq $384307168202282325, %rax cmpq %rbx, %rax jb .L76 movabsq $384307168202282325, %rax cmpq %rax, %rbx cmovbe %rbx, %rax leaq (%rax,%rax,2), %rax leaq 0(,%rax,8), %r14 movq %r14, %rdi .LEHB4: call _Znwm@PLT .LEHE4: jmp .L77 .L76: movq 552(%rsp), %rax subq %fs:40, %rax jne .L78 leaq .LC4(%rip), %rdi .LEHB5: call _ZSt20__throw_length_errorPKc@PLT .LEHE5: .L67: endbr64 movq %rax, %rbx movq $0, 8(%rsp) movl $0, %r13d movl $0, %r15d .L41: movq %r15, %rbp .L52: cmpq %rbp, %r13 jne .L54 movq 8(%rsp), %rsi subq %r15, %rsi testq %r15, %r15 je .L55 movq %r15, %rdi call _ZdlPvm@PLT .L55: leaq 32(%rsp), %rdi call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT movq 552(%rsp), %rax subq %fs:40, %rax je .L56 call __stack_chk_fail@PLT .L78: call __stack_chk_fail@PLT .L77: movq %rax, %r15 movq %rbx, %rdx .L42: movq $0, (%rax) movq $0, 8(%rax) movq $0, 16(%rax) addq $24, %rax subq $1, %rdx jne .L42 leaq (%rbx,%rbx,2), %rax leaq (%r15,%rax,8), %r13 leaq (%r15,%r14), %rax movq %rax, 8(%rsp) testl %ebp, %ebp jg .L57 jmp .L43 .L80: movq %rax, %rdi leaq 28(%rsp), %rsi .LEHB6: call _ZNSirsERi@PLT movl 24(%rsp), %eax subl $1, %eax salq $32, %rax movl 28(%rsp), %ecx leal -1(%rcx), %edx movslq %edx, %rdx addq %rdx, %rax movq %rax, (%r12,%rbx,8) addq $1, %rbx cmpl %ebx, %ebp jle .L79 .L44: leaq 32(%rsp), %rdi movq %r14, %rsi call _ZNSirsERi@PLT .LEHE6: jmp .L80 .L79: cmpq %r15, %r13 je .L59 .L43: movq %r15, %rbx jmp .L47 .L46: addq $24, %rbx cmpq %rbx, %r13 je .L45 .L47: movq (%rbx), %rdi testq %rdi, %rdi je .L46 movq 16(%rbx), %rsi subq %rdi, %rsi call _ZdlPvm@PLT jmp .L46 .L59: movq %r13, %r15 .L45: testq %r15, %r15 je .L48 movq 8(%rsp), %rsi subq %r15, %rsi movq %r15, %rdi call _ZdlPvm@PLT .L48: leaq 24+_ZTVSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rax movq %rax, 32(%rsp) leaq 40(%rax), %rax movq %rax, 288(%rsp) leaq 16+_ZTVSt13basic_filebufIcSt11char_traitsIcEE(%rip), %rax movq %rax, 48(%rsp) leaq 48(%rsp), %rdi .LEHB7: call _ZNSt13basic_filebufIcSt11char_traitsIcEE5closeEv@PLT .LEHE7: jmp .L50 .L65: endbr64 movq %rax, %rdi call __cxa_begin_catch@PLT call __cxa_end_catch@PLT .L50: leaq 152(%rsp), %rdi call _ZNSt12__basic_fileIcED1Ev@PLT leaq 16+_ZTVSt15basic_streambufIcSt11char_traitsIcEE(%rip), %rax movq %rax, 48(%rsp) leaq 104(%rsp), %rdi call _ZNSt6localeD1Ev@PLT movq 8+_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rax movq %rax, 32(%rsp) movq -24(%rax), %rax movq 16+_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rcx movq %rcx, 32(%rsp,%rax) movq $0, 40(%rsp) leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax movq %rax, 288(%rsp) leaq 288(%rsp), %rdi call _ZNSt8ios_baseD2Ev@PLT movq 552(%rsp), %rax subq %fs:40, %rax jne .L81 addq $568, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L66: .cfi_restore_state endbr64 movq %rax, %rbx movq $0, 8(%rsp) movl $0, %r13d movl $0, %r15d jmp .L41 .L61: endbr64 movq %rax, %rbx jmp .L41 .L54: movq 0(%rbp), %rdi movq 16(%rbp), %rsi subq %rdi, %rsi testq %rdi, %rdi je .L53 call _ZdlPvm@PLT .L53: addq $24, %rbp jmp .L52 .L56: movq %rbx, %rdi .LEHB8: call _Unwind_Resume@PLT .LEHE8: .L81: call __stack_chk_fail@PLT .cfi_endproc .LFE4418: .globl __gxx_personality_v0 .section .gcc_except_table,"a",@progbits .align 4 .LLSDA4418: .byte 0xff .byte 0x9b .uleb128 .LLSDATT4418-.LLSDATTD4418 .LLSDATTD4418: .byte 0x1 .uleb128 .LLSDACSE4418-.LLSDACSB4418 .LLSDACSB4418: .uleb128 .LEHB0-.LFB4418 .uleb128 .LEHE0-.LEHB0 .uleb128 .L62-.LFB4418 .uleb128 0 .uleb128 .LEHB1-.LFB4418 .uleb128 .LEHE1-.LEHB1 .uleb128 .L63-.LFB4418 .uleb128 0 .uleb128 .LEHB2-.LFB4418 .uleb128 .LEHE2-.LEHB2 .uleb128 .L64-.LFB4418 .uleb128 0 .uleb128 .LEHB3-.LFB4418 .uleb128 .LEHE3-.LEHB3 .uleb128 0 .uleb128 0 .uleb128 .LEHB4-.LFB4418 .uleb128 .LEHE4-.LEHB4 .uleb128 .L66-.LFB4418 .uleb128 0 .uleb128 .LEHB5-.LFB4418 .uleb128 .LEHE5-.LEHB5 .uleb128 .L67-.LFB4418 .uleb128 0 .uleb128 .LEHB6-.LFB4418 .uleb128 .LEHE6-.LEHB6 .uleb128 .L61-.LFB4418 .uleb128 0 .uleb128 .LEHB7-.LFB4418 .uleb128 .LEHE7-.LEHB7 .uleb128 .L65-.LFB4418 .uleb128 0x1 .uleb128 .LEHB8-.LFB4418 .uleb128 .LEHE8-.LEHB8 .uleb128 0 .uleb128 0 .LLSDACSE4418: .byte 0x1 .byte 0 .align 4 .long 0 .LLSDATT4418: .text .size _Z9readGraphPyii, .-_Z9readGraphPyii .section .rodata.str1.1 .LC5: .string "data/nums.txt" .text .globl _Z7readIntRiS_ .type _Z7readIntRiS_, @function _Z7readIntRiS_: .LFB4417: .cfi_startproc .cfi_personality 0x9b,DW.ref.__gxx_personality_v0 .cfi_lsda 0x1b,.LLSDA4417 endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $528, %rsp .cfi_def_cfa_offset 576 movq %rdi, %r12 movq %rsi, %rbp movq %fs:40, %rax movq %rax, 520(%rsp) xorl %eax, %eax movq %rsp, %r14 leaq 256(%rsp), %rdi call _ZNSt8ios_baseC2Ev@PLT leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax movq %rax, 256(%rsp) movq $0, 472(%rsp) movb $0, 480(%rsp) movb $0, 481(%rsp) movq $0, 488(%rsp) movq $0, 496(%rsp) movq $0, 504(%rsp) movq $0, 512(%rsp) movq 8+_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rbx movq %rbx, (%rsp) movq 16+_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %r13 movq -24(%rbx), %rax movq %r13, (%rsp,%rax) movq $0, 8(%rsp) movq (%rsp), %rax movq %r14, %rdi addq -24(%rax), %rdi movl $0, %esi .LEHB9: call _ZNSt9basic_iosIcSt11char_traitsIcEE4initEPSt15basic_streambufIcS1_E@PLT .LEHE9: leaq 24+_ZTVSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rax movq %rax, (%rsp) leaq 40(%rax), %rax movq %rax, 256(%rsp) leaq 16(%rsp), %rdi .LEHB10: call _ZNSt13basic_filebufIcSt11char_traitsIcEEC1Ev@PLT .LEHE10: leaq 16(%rsp), %rsi leaq 256(%rsp), %rdi .LEHB11: call _ZNSt9basic_iosIcSt11char_traitsIcEE4initEPSt15basic_streambufIcS1_E@PLT leaq 16(%rsp), %rdi movl $8, %edx leaq .LC5(%rip), %rsi call _ZNSt13basic_filebufIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode@PLT testq %rax, %rax je .L103 movq (%rsp), %rax movq -24(%rax), %rax leaq (%rsp,%rax), %rdi movl $0, %esi call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT jmp .L84 .L103: movq (%rsp), %rax movq -24(%rax), %rax leaq (%rsp,%rax), %rdi movl 32(%rdi), %esi orl $4, %esi call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT .LEHE11: .L84: movq %rsp, %rdi movq %r12, %rsi .LEHB12: call _ZNSirsERi@PLT movq %rax, %rdi movq %rbp, %rsi call _ZNSirsERi@PLT .LEHE12: jmp .L104 .L98: endbr64 movq %rax, %rbp leaq 16(%rsp), %rdi call _ZNSt13basic_filebufIcSt11char_traitsIcEED1Ev@PLT movq %rbp, %rax .L87: movq %rbx, (%rsp) movq -24(%rbx), %rdx movq %r13, (%rsp,%rdx) movq $0, 8(%rsp) movq %rax, %rbx .L88: leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax movq %rax, 256(%rsp) leaq 256(%rsp), %rdi call _ZNSt8ios_baseD2Ev@PLT movq 520(%rsp), %rax subq %fs:40, %rax je .L89 call __stack_chk_fail@PLT .L97: endbr64 jmp .L87 .L96: endbr64 movq %rax, %rbx jmp .L88 .L89: movq %rbx, %rdi .LEHB13: call _Unwind_Resume@PLT .LEHE13: .L104: leaq 24+_ZTVSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rax movq %rax, (%rsp) leaq 40(%rax), %rax movq %rax, 256(%rsp) leaq 16+_ZTVSt13basic_filebufIcSt11char_traitsIcEE(%rip), %rax movq %rax, 16(%rsp) leaq 16(%rsp), %rdi .LEHB14: call _ZNSt13basic_filebufIcSt11char_traitsIcEE5closeEv@PLT .LEHE14: jmp .L91 .L99: endbr64 movq %rax, %rdi call __cxa_begin_catch@PLT call __cxa_end_catch@PLT .L91: leaq 120(%rsp), %rdi call _ZNSt12__basic_fileIcED1Ev@PLT leaq 16+_ZTVSt15basic_streambufIcSt11char_traitsIcEE(%rip), %rax movq %rax, 16(%rsp) leaq 72(%rsp), %rdi call _ZNSt6localeD1Ev@PLT movq %rbx, (%rsp) movq -24(%rbx), %rax movq %r13, (%rsp,%rax) movq $0, 8(%rsp) leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax movq %rax, 256(%rsp) leaq 256(%rsp), %rdi call _ZNSt8ios_baseD2Ev@PLT movq 520(%rsp), %rax subq %fs:40, %rax jne .L105 addq $528, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L95: .cfi_restore_state endbr64 movq %rax, %rbx movq %rsp, %rdi call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT movq 520(%rsp), %rax subq %fs:40, %rax je .L93 call __stack_chk_fail@PLT .L93: movq %rbx, %rdi .LEHB15: call _Unwind_Resume@PLT .LEHE15: .L105: call __stack_chk_fail@PLT .cfi_endproc .LFE4417: .section .gcc_except_table .align 4 .LLSDA4417: .byte 0xff .byte 0x9b .uleb128 .LLSDATT4417-.LLSDATTD4417 .LLSDATTD4417: .byte 0x1 .uleb128 .LLSDACSE4417-.LLSDACSB4417 .LLSDACSB4417: .uleb128 .LEHB9-.LFB4417 .uleb128 .LEHE9-.LEHB9 .uleb128 .L96-.LFB4417 .uleb128 0 .uleb128 .LEHB10-.LFB4417 .uleb128 .LEHE10-.LEHB10 .uleb128 .L97-.LFB4417 .uleb128 0 .uleb128 .LEHB11-.LFB4417 .uleb128 .LEHE11-.LEHB11 .uleb128 .L98-.LFB4417 .uleb128 0 .uleb128 .LEHB12-.LFB4417 .uleb128 .LEHE12-.LEHB12 .uleb128 .L95-.LFB4417 .uleb128 0 .uleb128 .LEHB13-.LFB4417 .uleb128 .LEHE13-.LEHB13 .uleb128 0 .uleb128 0 .uleb128 .LEHB14-.LFB4417 .uleb128 .LEHE14-.LEHB14 .uleb128 .L99-.LFB4417 .uleb128 0x1 .uleb128 .LEHB15-.LFB4417 .uleb128 .LEHE15-.LEHB15 .uleb128 0 .uleb128 0 .LLSDACSE4417: .byte 0x1 .byte 0 .align 4 .long 0 .LLSDATT4417: .text .size _Z7readIntRiS_, .-_Z7readIntRiS_ .section .text._ZSt25__unguarded_linear_insertIPiN9__gnu_cxx5__ops14_Val_less_iterEEvT_T0_,"axG",@progbits,_ZSt25__unguarded_linear_insertIPiN9__gnu_cxx5__ops14_Val_less_iterEEvT_T0_,comdat .weak _ZSt25__unguarded_linear_insertIPiN9__gnu_cxx5__ops14_Val_less_iterEEvT_T0_ .type _ZSt25__unguarded_linear_insertIPiN9__gnu_cxx5__ops14_Val_less_iterEEvT_T0_, @function _ZSt25__unguarded_linear_insertIPiN9__gnu_cxx5__ops14_Val_less_iterEEvT_T0_: .LFB5206: .cfi_startproc endbr64 movl (%rdi), %ecx leaq -4(%rdi), %rax movl -4(%rdi), %edx cmpl %edx, %ecx jge .L107 .L108: movl %edx, 4(%rax) movq %rax, %rdi subq $4, %rax movl (%rax), %edx cmpl %edx, %ecx jl .L108 .L107: movl %ecx, (%rdi) ret .cfi_endproc .LFE5206: .size _ZSt25__unguarded_linear_insertIPiN9__gnu_cxx5__ops14_Val_less_iterEEvT_T0_, .-_ZSt25__unguarded_linear_insertIPiN9__gnu_cxx5__ops14_Val_less_iterEEvT_T0_ .section .text._ZSt16__insertion_sortIPiN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_,"axG",@progbits,_ZSt16__insertion_sortIPiN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_,comdat .weak _ZSt16__insertion_sortIPiN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_ .type _ZSt16__insertion_sortIPiN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_, @function _ZSt16__insertion_sortIPiN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_: .LFB5170: .cfi_startproc endbr64 cmpq %rsi, %rdi je .L119 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 movq %rdi, %rbp movq %rsi, %r13 leaq 4(%rdi), %rbx cmpq %rbx, %rsi je .L110 movl $4, %r14d jmp .L116 .L113: je .L122 .L114: movl %r12d, 0(%rbp) .L115: addq $4, %rbx cmpq %rbx, %r13 je .L110 .L116: movl (%rbx), %r12d movl 0(%rbp), %eax cmpl %r12d, %eax jle .L112 movq %rbx, %rdx subq %rbp, %rdx cmpq $4, %rdx jle .L113 movq %r14, %rdi subq %rdx, %rdi addq %rbx, %rdi movq %rbp, %rsi call memmove@PLT jmp .L114 .L122: movl %eax, (%rbx) jmp .L114 .L112: movq %rbx, %rdi call _ZSt25__unguarded_linear_insertIPiN9__gnu_cxx5__ops14_Val_less_iterEEvT_T0_ jmp .L115 .L110: popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L119: .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 .cfi_restore 13 .cfi_restore 14 ret .cfi_endproc .LFE5170: .size _ZSt16__insertion_sortIPiN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_, .-_ZSt16__insertion_sortIPiN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_ .section .text._ZSt13__adjust_heapIPiliN9__gnu_cxx5__ops15_Iter_less_iterEEvT_T0_S5_T1_T2_,"axG",@progbits,_ZSt13__adjust_heapIPiliN9__gnu_cxx5__ops15_Iter_less_iterEEvT_T0_S5_T1_T2_,comdat .weak _ZSt13__adjust_heapIPiliN9__gnu_cxx5__ops15_Iter_less_iterEEvT_T0_S5_T1_T2_ .type _ZSt13__adjust_heapIPiliN9__gnu_cxx5__ops15_Iter_less_iterEEvT_T0_S5_T1_T2_, @function _ZSt13__adjust_heapIPiliN9__gnu_cxx5__ops15_Iter_less_iterEEvT_T0_S5_T1_T2_: .LFB5259: .cfi_startproc endbr64 movq %rsi, %r8 movl %ecx, %r9d leaq -1(%rdx), %rax movq %rax, %r10 shrq $63, %r10 addq %rax, %r10 sarq %r10 cmpq %r10, %rsi jl .L126 movq %rsi, %rax testb $1, %dl jne .L128 movq %r8, %rax jmp .L130 .L125: movl (%rdi,%rax,4), %ecx movl %ecx, (%rdi,%rsi,4) cmpq %r10, %rax jge .L135 movq %rax, %rsi .L126: leaq 1(%rsi), %rcx leaq (%rcx,%rcx), %rax movl -4(%rdi,%rcx,8), %r11d cmpl %r11d, (%rdi,%rcx,8) jge .L125 subq $1, %rax jmp .L125 .L135: testb $1, %dl jne .L127 .L130: subq $2, %rdx movq %rdx, %rcx shrq $63, %rcx addq %rcx, %rdx sarq %rdx cmpq %rax, %rdx je .L136 .L127: leaq -1(%rax), %rdx movq %rdx, %rcx shrq $63, %rcx addq %rdx, %rcx sarq %rcx cmpq %r8, %rax jg .L129 jmp .L128 .L136: leaq 2(%rax,%rax), %rdx movl -4(%rdi,%rdx,4), %ecx movl %ecx, (%rdi,%rax,4) leaq -1(%rdx), %rax jmp .L127 .L132: movq %rdx, %rcx .L129: movl (%rdi,%rcx,4), %edx cmpl %edx, %r9d jle .L128 movl %edx, (%rdi,%rax,4) leaq -1(%rcx), %rdx movq %rdx, %rax shrq $63, %rax addq %rdx, %rax sarq %rax movq %rax, %rdx movq %rcx, %rax cmpq %rcx, %r8 jl .L132 .L128: movl %r9d, (%rdi,%rax,4) ret .cfi_endproc .LFE5259: .size _ZSt13__adjust_heapIPiliN9__gnu_cxx5__ops15_Iter_less_iterEEvT_T0_S5_T1_T2_, .-_ZSt13__adjust_heapIPiliN9__gnu_cxx5__ops15_Iter_less_iterEEvT_T0_S5_T1_T2_ .section .text._ZSt16__introsort_loopIPilN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_T1_,"axG",@progbits,_ZSt16__introsort_loopIPilN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_T1_,comdat .weak _ZSt16__introsort_loopIPilN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_T1_ .type _ZSt16__introsort_loopIPilN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_T1_, @function _ZSt16__introsort_loopIPilN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_T1_: .LFB5094: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $8, %rsp .cfi_def_cfa_offset 48 movq %rdi, %rbp movq %rsi, %rax subq %rdi, %rax cmpq $64, %rax jle .L137 movq %rsi, %rdi movq %rdx, %r12 testq %rdx, %rdx jne .L140 movq %rdi, %rbx .L157: sarq $2, %rax movq %rax, %r12 leaq -2(%rax), %rax movq %rax, %r13 shrq $63, %r13 addq %rax, %r13 sarq %r13 jmp .L141 .L165: subq $1, %r13 .L141: movl 0(%rbp,%r13,4), %ecx movq %r12, %rdx movq %r13, %rsi movq %rbp, %rdi call _ZSt13__adjust_heapIPiliN9__gnu_cxx5__ops15_Iter_less_iterEEvT_T0_S5_T1_T2_ testq %r13, %r13 jne .L165 movq %rbx, %rax subq %rbp, %rax cmpq $4, %rax jle .L137 .L142: subq $4, %rbx movl (%rbx), %ecx movl 0(%rbp), %eax movl %eax, (%rbx) movq %rbx, %r12 subq %rbp, %r12 movq %r12, %rdx sarq $2, %rdx movl $0, %esi movq %rbp, %rdi call _ZSt13__adjust_heapIPiliN9__gnu_cxx5__ops15_Iter_less_iterEEvT_T0_S5_T1_T2_ cmpq $4, %r12 jg .L142 .L137: addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L145: .cfi_restore_state cmpl %esi, %edx jge .L147 movl 0(%rbp), %eax movl %esi, 0(%rbp) movl %eax, -4(%rdi) jmp .L148 .L147: movl 0(%rbp), %eax movl %edx, 0(%rbp) movl %eax, 4(%rbp) jmp .L148 .L144: movl -4(%rdi), %esi cmpl %esi, %edx jge .L149 movl 0(%rbp), %eax movl %edx, 0(%rbp) movl %eax, 4(%rbp) jmp .L148 .L149: cmpl %esi, %eax jge .L150 movl 0(%rbp), %eax movl %esi, 0(%rbp) movl %eax, -4(%rdi) jmp .L148 .L150: movl 0(%rbp), %edx movl %eax, 0(%rbp) movl %edx, (%rcx) jmp .L148 .L166: movq %r12, %rdx movq %rdi, %rsi movq %rbx, %rdi call _ZSt16__introsort_loopIPilN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_T1_ movq %rbx, %rax subq %rbp, %rax cmpq $64, %rax jle .L137 testq %r12, %r12 je .L157 movq %rbx, %rdi .L140: subq $1, %r12 movq %rax, %rdx sarq $2, %rdx shrq $63, %rax addq %rdx, %rax sarq %rax leaq 0(%rbp,%rax,4), %rcx leaq 4(%rbp), %rbx movl 4(%rbp), %edx movl (%rcx), %eax cmpl %eax, %edx jge .L144 movl -4(%rdi), %esi cmpl %esi, %eax jge .L145 movl 0(%rbp), %edx movl %eax, 0(%rbp) movl %edx, (%rcx) .L148: movq %rdi, %rsi .L146: movl (%rbx), %ecx movl 0(%rbp), %edx cmpl %edx, %ecx jge .L151 .L152: addq $4, %rbx movl (%rbx), %ecx cmpl %edx, %ecx jl .L152 .L151: leaq -4(%rsi), %rax movl -4(%rsi), %esi cmpl %edx, %esi jle .L153 .L154: subq $4, %rax movl (%rax), %esi cmpl %edx, %esi jg .L154 .L153: cmpq %rax, %rbx jnb .L166 movl %esi, (%rbx) movl %ecx, (%rax) addq $4, %rbx movq %rax, %rsi jmp .L146 .cfi_endproc .LFE5094: .size _ZSt16__introsort_loopIPilN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_T1_, .-_ZSt16__introsort_loopIPilN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_T1_ .text .globl main .type main, @function main: .LFB4429: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 movq %rsp, %rbp .cfi_def_cfa_register 6 pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $136, %rsp .cfi_offset 15, -24 .cfi_offset 14, -32 .cfi_offset 13, -40 .cfi_offset 12, -48 .cfi_offset 3, -56 movq %fs:40, %rax movq %rax, -56(%rbp) xorl %eax, %eax leaq -124(%rbp), %rsi leaq -128(%rbp), %rdi call _Z7readIntRiS_ movl -124(%rbp), %ebx movslq %ebx, %rdi salq $3, %rdi call malloc@PLT movq %rax, -152(%rbp) movl %ebx, %edx movl -128(%rbp), %esi movq %rax, %rdi call _Z9readGraphPyii movl -128(%rbp), %esi movslq %esi, %rcx leaq 15(,%rcx,4), %rax movq %rax, %rdi andq $-16, %rdi andq $-4096, %rax movq %rsp, %rdx subq %rax, %rdx .L168: cmpq %rdx, %rsp je .L169 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L168 .L169: movq %rdi, %rax andl $4095, %eax subq %rax, %rsp testq %rax, %rax je .L170 orq $0, -8(%rsp,%rax) .L170: movq %rsp, %rbx movl -124(%rbp), %r9d movslq %r9d, %rdx salq $2, %rdx leaq 15(%rdx), %rax movq %rax, %rdi andq $-16, %rdi andq $-4096, %rax movq %rsp, %r8 subq %rax, %r8 .L171: cmpq %r8, %rsp je .L172 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L171 .L172: movq %rdi, %rax andl $4095, %eax subq %rax, %rsp testq %rax, %rax je .L173 orq $0, -8(%rsp,%rax) .L173: movq %rsp, -160(%rbp) testl %esi, %esi jle .L174 movl $0, %eax .L175: movl %eax, (%rbx,%rax,4) addq $1, %rax cmpq %rax, %rcx jne .L175 .L174: testl %r9d, %r9d jle .L176 movq -160(%rbp), %rcx movq %rcx, %rax addq %rcx, %rdx .L177: movl $0, (%rax) addq $4, %rax cmpq %rdx, %rax jne .L177 .L176: call clock@PLT movq %rax, -168(%rbp) movl $0, %r15d leaq -112(%rbp), %r13 leaq -120(%rbp), %rax movq %rax, -136(%rbp) leaq -104(%rbp), %rax movq %rax, -144(%rbp) leaq -96(%rbp), %r12 .L185: movl $0, -60(%rbp) movslq -128(%rbp), %rsi salq $2, %rsi movq %r13, %rdi call cudaMalloc@PLT movslq -124(%rbp), %rsi salq $3, %rsi movq -136(%rbp), %rdi call cudaMalloc@PLT movslq -124(%rbp), %rsi salq $2, %rsi movq -144(%rbp), %rdi call cudaMalloc@PLT movl $4, %esi movq %r12, %rdi call cudaMalloc@PLT movslq -128(%rbp), %rdx salq $2, %rdx movl $1, %ecx movq %rbx, %rsi movq -112(%rbp), %rdi call cudaMemcpy@PLT movslq -124(%rbp), %rdx salq $3, %rdx movl $1, %ecx movq -152(%rbp), %rsi movq -120(%rbp), %rdi call cudaMemcpy@PLT movslq -124(%rbp), %rdx salq $2, %rdx movl $1, %ecx movq -160(%rbp), %rsi movq -104(%rbp), %rdi call cudaMemcpy@PLT leaq -60(%rbp), %rsi movl $1, %ecx movl $4, %edx movq -96(%rbp), %rdi call cudaMemcpy@PLT testl %r15d, %r15d je .L178 movl $256, -72(%rbp) movl $1, -68(%rbp) movl $1, -64(%rbp) movl $256, -84(%rbp) movl $1, -80(%rbp) movl $1, -76(%rbp) movl $0, %r9d movl $0, %r8d movq -72(%rbp), %rdx movl $1, %ecx movq -84(%rbp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L207 .L180: call cudaThreadSynchronize@PLT leaq -60(%rbp), %rdi movl $2, %ecx movl $4, %edx movq -96(%rbp), %rsi call cudaMemcpy@PLT movslq -128(%rbp), %rdx salq $2, %rdx movl $2, %ecx movq -112(%rbp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT movq %r13, %rdi call cudaFree@PLT movq -136(%rbp), %rdi call cudaFree@PLT movq -144(%rbp), %rdi call cudaFree@PLT movq %r12, %rdi call cudaFree@PLT cmpl $0, -60(%rbp) je .L182 xorl $1, %r15d leaq -60(%rbp), %r14 jmp .L184 .L207: movl -124(%rbp), %r8d movq -96(%rbp), %rcx movq -104(%rbp), %rdx movq -120(%rbp), %rsi movq -112(%rbp), %rdi call _Z44__device_stub__Z17select_winner_oddPiPyS_S_iPiPyS_S_i jmp .L180 .L178: movl $256, -72(%rbp) movl $1, -68(%rbp) movl $1, -64(%rbp) movl $256, -84(%rbp) movl $1, -80(%rbp) movl $1, -76(%rbp) movl $0, %r9d movl $0, %r8d movq -72(%rbp), %rdx movl $1, %ecx movq -84(%rbp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L180 movl -124(%rbp), %r8d movq -96(%rbp), %rcx movq -104(%rbp), %rdx movq -120(%rbp), %rsi movq -112(%rbp), %rdi call _Z45__device_stub__Z18select_winner_evenPiPyS_S_iPiPyS_S_i jmp .L180 .L183: call cudaThreadSynchronize@PLT movl $2, %ecx movl $4, %edx movq -96(%rbp), %rsi movq %r14, %rdi call cudaMemcpy@PLT movslq -128(%rbp), %rdx salq $2, %rdx movl $2, %ecx movq -112(%rbp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT movq %r12, %rdi call cudaFree@PLT movq %r13, %rdi call cudaFree@PLT cmpl $0, -60(%rbp) je .L185 .L184: movl $0, -60(%rbp) movl $4, %esi movq %r12, %rdi call cudaMalloc@PLT movslq -128(%rbp), %rsi salq $2, %rsi movq %r13, %rdi call cudaMalloc@PLT movl $1, %ecx movl $4, %edx movq %r14, %rsi movq -96(%rbp), %rdi call cudaMemcpy@PLT movslq -128(%rbp), %rdx salq $2, %rdx movl $1, %ecx movq %rbx, %rsi movq -112(%rbp), %rdi call cudaMemcpy@PLT movl $256, -72(%rbp) movl $1, -68(%rbp) movl $1, -64(%rbp) movl $256, -84(%rbp) movl $1, -80(%rbp) movl $1, -76(%rbp) movl $0, %r9d movl $0, %r8d movq -72(%rbp), %rdx movl $1, %ecx movq -84(%rbp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L183 movq -96(%rbp), %rdx movl -128(%rbp), %esi movq -112(%rbp), %rdi call _Z26__device_stub__Z4jumpPiiS_PiiS_ jmp .L183 .L182: call clock@PLT movq -168(%rbp), %rcx subq %rcx, %rax pxor %xmm0, %xmm0 cvtsi2ssq %rax, %xmm0 divss .LC6(%rip), %xmm0 cvtss2sd %xmm0, %xmm0 leaq _ZSt4cout(%rip), %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movslq -128(%rbp), %r12 salq $2, %r12 leaq (%rbx,%r12), %r13 cmpq %rbx, %r13 je .L186 movq %r12, %rdx sarq $2, %rdx movl $64, %eax je .L187 bsrq %rdx, %rax xorl $63, %eax .L187: movl $63, %edx subl %eax, %edx movslq %edx, %rdx addq %rdx, %rdx movq %r13, %rsi movq %rbx, %rdi call _ZSt16__introsort_loopIPilN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_T1_ cmpq $64, %r12 jle .L188 leaq 64(%rbx), %r12 movq %r12, %rsi movq %rbx, %rdi call _ZSt16__insertion_sortIPiN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_ cmpq %r12, %r13 je .L189 .L190: movq %r12, %rdi call _ZSt25__unguarded_linear_insertIPiN9__gnu_cxx5__ops14_Val_less_iterEEvT_T0_ addq $4, %r12 cmpq %r12, %r13 jne .L190 jmp .L189 .L188: movq %r13, %rsi movq %rbx, %rdi call _ZSt16__insertion_sortIPiN9__gnu_cxx5__ops15_Iter_less_iterEEvT_S4_T0_ .L189: movslq -128(%rbp), %rax leaq (%rbx,%rax,4), %rcx cmpq %rbx, %rcx je .L186 movq %rbx, %rax .L191: movq %rax, %rsi addq $4, %rax cmpq %rax, %rcx je .L208 movl (%rax), %edi cmpl %edi, -4(%rax) jne .L191 cmpq %rsi, %rcx je .L194 leaq 8(%rsi), %rax cmpq %rax, %rcx jne .L197 .L195: addq $4, %rsi .L194: subq %rbx, %rsi sarq $2, %rsi leaq _ZSt4cout(%rip), %rdi call _ZNSo9_M_insertIlEERSoT_@PLT movq -56(%rbp), %rax subq %fs:40, %rax jne .L209 movl $0, %eax leaq -40(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp .cfi_remember_state .cfi_def_cfa 7, 8 ret .L208: .cfi_restore_state movq %rax, %rsi jmp .L194 .L196: addq $4, %rax cmpq %rax, %rcx je .L195 .L197: movl (%rax), %edx cmpl %edx, (%rsi) je .L196 movl %edx, 4(%rsi) leaq 4(%rsi), %rsi jmp .L196 .L186: movq %rbx, %rsi jmp .L194 .L209: call __stack_chk_fail@PLT .cfi_endproc .LFE4429: .size main, .-main .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC6: .long 1232348160 .hidden DW.ref.__gxx_personality_v0 .weak DW.ref.__gxx_personality_v0 .section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat .align 8 .type DW.ref.__gxx_personality_v0, @object .size DW.ref.__gxx_personality_v0, 8 DW.ref.__gxx_personality_v0: .quad __gxx_personality_v0 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <fstream> #include <vector> #include <stdio.h> #include <algorithm> #include <time.h> using namespace std; void readInt(int &n, int &m) { ifstream fin_n("data/nums.txt"); fin_n >> n >> m; } void readGraph(unsigned long long *neib, int n, int m) { ifstream fin_g("data/graph.txt"); vector<vector<int> > vert; vert.resize(n); for (int i = 0; i < m; ++i) { int u, v; fin_g >> u >> v; u--, v--; neib[i] = ((unsigned long long)u << 32) + v; } } __global__ void select_winner_odd(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < e_num) { unsigned long long temp = edge_list[tid]; int u, v; u = temp & 0xffffffff; v = temp >> 32; if (parent[u] != parent[v]) { parent[max(parent[u], parent[v])] = parent[min(parent[u], parent[v])]; *flag = 1; } else { mark[tid] = 1; } } } __global__ void select_winner_even(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < e_num) { unsigned long long temp = edge_list[tid]; unsigned int u, v; u = temp & 0xffffffff; v = (temp >> 32) & 0xffffffff; if (parent[u] != parent[v]) { parent[min(parent[u], parent[v])] = parent[max(parent[u], parent[v])]; *flag = 1; } else { mark[tid] = 1; } } } __global__ void jump(int *parent, int v_num, int *flag) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < v_num) { int p = parent[tid]; int p_p = parent[p]; if (p != p_p) { parent[tid] = p_p; (*flag) = 1; } } } int main() { int n, m; readInt(n, m); unsigned long long *h_edge_list, *d_edge_list; h_edge_list = (unsigned long long*)malloc(m * sizeof(unsigned long long)); readGraph(h_edge_list, n, m); int h_parent[n], *d_parent; int h_mark[m], *d_mark; for (int i = 0; i < n; ++i) { h_parent[i] = i; } for (int i = 0; i < m; ++i) { h_mark[i] = 0; } int flag[1], *d_flag; int count = 0; clock_t beg = clock(); do { flag[0] = 0; cudaMalloc(&d_parent, n * sizeof(int)); cudaMalloc(&d_edge_list, m * sizeof(unsigned long long)); cudaMalloc(&d_mark, m * sizeof(int)); cudaMalloc(&d_flag, sizeof(int)); cudaMemcpy(d_parent, h_parent, n * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_edge_list, h_edge_list, m * sizeof(unsigned long long), cudaMemcpyHostToDevice); cudaMemcpy(d_mark, h_mark, m * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_flag, flag, sizeof(int), cudaMemcpyHostToDevice); if (count) { select_winner_odd<<<256, 256>>>(d_parent, d_edge_list, d_mark, d_flag, m); } else { select_winner_even<<<256, 256>>>(d_parent, d_edge_list, d_mark, d_flag, m); } cudaThreadSynchronize(); cudaMemcpy(flag, d_flag, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_parent, d_parent, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(&d_parent); cudaFree(&d_edge_list); cudaFree(&d_mark); cudaFree(&d_flag); if (!flag[0]) { break; } count ^= 1; do { flag[0] = 0; cudaMalloc(&d_flag, sizeof(int)); cudaMalloc(&d_parent, n * sizeof(int)); cudaMemcpy(d_flag, flag, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_parent, h_parent, n * sizeof(int), cudaMemcpyHostToDevice); jump<<<256, 256>>>(d_parent, n, d_flag); cudaThreadSynchronize(); cudaMemcpy(flag, d_flag, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_parent, d_parent, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(&d_flag); cudaFree(&d_parent); } while(flag[0]); } while(flag); cout << float(clock() - beg) / CLOCKS_PER_SEC << endl; sort(h_parent, h_parent + n); cout << unique(h_parent, h_parent + n) - h_parent; }
#include <hip/hip_runtime.h> #include <iostream> #include <fstream> #include <vector> #include <stdio.h> #include <algorithm> #include <time.h> using namespace std; void readInt(int &n, int &m) { ifstream fin_n("data/nums.txt"); fin_n >> n >> m; } void readGraph(unsigned long long *neib, int n, int m) { ifstream fin_g("data/graph.txt"); vector<vector<int> > vert; vert.resize(n); for (int i = 0; i < m; ++i) { int u, v; fin_g >> u >> v; u--, v--; neib[i] = ((unsigned long long)u << 32) + v; } } __global__ void select_winner_odd(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < e_num) { unsigned long long temp = edge_list[tid]; int u, v; u = temp & 0xffffffff; v = temp >> 32; if (parent[u] != parent[v]) { parent[max(parent[u], parent[v])] = parent[min(parent[u], parent[v])]; *flag = 1; } else { mark[tid] = 1; } } } __global__ void select_winner_even(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < e_num) { unsigned long long temp = edge_list[tid]; unsigned int u, v; u = temp & 0xffffffff; v = (temp >> 32) & 0xffffffff; if (parent[u] != parent[v]) { parent[min(parent[u], parent[v])] = parent[max(parent[u], parent[v])]; *flag = 1; } else { mark[tid] = 1; } } } __global__ void jump(int *parent, int v_num, int *flag) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < v_num) { int p = parent[tid]; int p_p = parent[p]; if (p != p_p) { parent[tid] = p_p; (*flag) = 1; } } } int main() { int n, m; readInt(n, m); unsigned long long *h_edge_list, *d_edge_list; h_edge_list = (unsigned long long*)malloc(m * sizeof(unsigned long long)); readGraph(h_edge_list, n, m); int h_parent[n], *d_parent; int h_mark[m], *d_mark; for (int i = 0; i < n; ++i) { h_parent[i] = i; } for (int i = 0; i < m; ++i) { h_mark[i] = 0; } int flag[1], *d_flag; int count = 0; clock_t beg = clock(); do { flag[0] = 0; hipMalloc(&d_parent, n * sizeof(int)); hipMalloc(&d_edge_list, m * sizeof(unsigned long long)); hipMalloc(&d_mark, m * sizeof(int)); hipMalloc(&d_flag, sizeof(int)); hipMemcpy(d_parent, h_parent, n * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_edge_list, h_edge_list, m * sizeof(unsigned long long), hipMemcpyHostToDevice); hipMemcpy(d_mark, h_mark, m * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_flag, flag, sizeof(int), hipMemcpyHostToDevice); if (count) { select_winner_odd<<<256, 256>>>(d_parent, d_edge_list, d_mark, d_flag, m); } else { select_winner_even<<<256, 256>>>(d_parent, d_edge_list, d_mark, d_flag, m); } hipDeviceSynchronize(); hipMemcpy(flag, d_flag, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_parent, d_parent, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(&d_parent); hipFree(&d_edge_list); hipFree(&d_mark); hipFree(&d_flag); if (!flag[0]) { break; } count ^= 1; do { flag[0] = 0; hipMalloc(&d_flag, sizeof(int)); hipMalloc(&d_parent, n * sizeof(int)); hipMemcpy(d_flag, flag, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_parent, h_parent, n * sizeof(int), hipMemcpyHostToDevice); jump<<<256, 256>>>(d_parent, n, d_flag); hipDeviceSynchronize(); hipMemcpy(flag, d_flag, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_parent, d_parent, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(&d_flag); hipFree(&d_parent); } while(flag[0]); } while(flag); cout << float(clock() - beg) / CLOCKS_PER_SEC << endl; sort(h_parent, h_parent + n); cout << unique(h_parent, h_parent + n) - h_parent; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <fstream> #include <vector> #include <stdio.h> #include <algorithm> #include <time.h> using namespace std; void readInt(int &n, int &m) { ifstream fin_n("data/nums.txt"); fin_n >> n >> m; } void readGraph(unsigned long long *neib, int n, int m) { ifstream fin_g("data/graph.txt"); vector<vector<int> > vert; vert.resize(n); for (int i = 0; i < m; ++i) { int u, v; fin_g >> u >> v; u--, v--; neib[i] = ((unsigned long long)u << 32) + v; } } __global__ void select_winner_odd(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < e_num) { unsigned long long temp = edge_list[tid]; int u, v; u = temp & 0xffffffff; v = temp >> 32; if (parent[u] != parent[v]) { parent[max(parent[u], parent[v])] = parent[min(parent[u], parent[v])]; *flag = 1; } else { mark[tid] = 1; } } } __global__ void select_winner_even(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < e_num) { unsigned long long temp = edge_list[tid]; unsigned int u, v; u = temp & 0xffffffff; v = (temp >> 32) & 0xffffffff; if (parent[u] != parent[v]) { parent[min(parent[u], parent[v])] = parent[max(parent[u], parent[v])]; *flag = 1; } else { mark[tid] = 1; } } } __global__ void jump(int *parent, int v_num, int *flag) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < v_num) { int p = parent[tid]; int p_p = parent[p]; if (p != p_p) { parent[tid] = p_p; (*flag) = 1; } } } int main() { int n, m; readInt(n, m); unsigned long long *h_edge_list, *d_edge_list; h_edge_list = (unsigned long long*)malloc(m * sizeof(unsigned long long)); readGraph(h_edge_list, n, m); int h_parent[n], *d_parent; int h_mark[m], *d_mark; for (int i = 0; i < n; ++i) { h_parent[i] = i; } for (int i = 0; i < m; ++i) { h_mark[i] = 0; } int flag[1], *d_flag; int count = 0; clock_t beg = clock(); do { flag[0] = 0; hipMalloc(&d_parent, n * sizeof(int)); hipMalloc(&d_edge_list, m * sizeof(unsigned long long)); hipMalloc(&d_mark, m * sizeof(int)); hipMalloc(&d_flag, sizeof(int)); hipMemcpy(d_parent, h_parent, n * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_edge_list, h_edge_list, m * sizeof(unsigned long long), hipMemcpyHostToDevice); hipMemcpy(d_mark, h_mark, m * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_flag, flag, sizeof(int), hipMemcpyHostToDevice); if (count) { select_winner_odd<<<256, 256>>>(d_parent, d_edge_list, d_mark, d_flag, m); } else { select_winner_even<<<256, 256>>>(d_parent, d_edge_list, d_mark, d_flag, m); } hipDeviceSynchronize(); hipMemcpy(flag, d_flag, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_parent, d_parent, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(&d_parent); hipFree(&d_edge_list); hipFree(&d_mark); hipFree(&d_flag); if (!flag[0]) { break; } count ^= 1; do { flag[0] = 0; hipMalloc(&d_flag, sizeof(int)); hipMalloc(&d_parent, n * sizeof(int)); hipMemcpy(d_flag, flag, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_parent, h_parent, n * sizeof(int), hipMemcpyHostToDevice); jump<<<256, 256>>>(d_parent, n, d_flag); hipDeviceSynchronize(); hipMemcpy(flag, d_flag, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_parent, d_parent, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(&d_flag); hipFree(&d_parent); } while(flag[0]); } while(flag); cout << float(clock() - beg) / CLOCKS_PER_SEC << endl; sort(h_parent, h_parent + n); cout << unique(h_parent, h_parent + n) - h_parent; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17select_winner_oddPiPyS_S_i .globl _Z17select_winner_oddPiPyS_S_i .p2align 8 .type _Z17select_winner_oddPiPyS_S_i,@function _Z17select_winner_oddPiPyS_S_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x34 s_load_b32 s3, s[0:1], 0x20 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_6 s_load_b128 s[4:7], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 3, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v3, vcc_lo, s6, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo s_mov_b32 s6, exec_lo global_load_b64 v[3:4], v[3:4], off s_waitcnt vmcnt(0) v_ashrrev_i32_e32 v6, 31, v3 v_mov_b32_e32 v5, v3 v_ashrrev_i32_e32 v8, 31, v4 v_mov_b32_e32 v7, v4 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[3:4], 2, v[5:6] v_lshlrev_b64 v[5:6], 2, v[7:8] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v3, vcc_lo, s4, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v5, vcc_lo, s4, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo s_clause 0x1 global_load_b32 v0, v[3:4], off global_load_b32 v3, v[5:6], off s_waitcnt vmcnt(0) v_cmpx_ne_u32_e64 v0, v3 s_xor_b32 s6, exec_lo, s6 s_cbranch_execz .LBB0_3 v_min_i32_e32 v1, v0, v3 v_max_i32_e32 v0, v0, v3 s_load_b64 s[2:3], s[0:1], 0x18 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[1:2], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v1, vcc_lo, s4, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo global_load_b32 v2, v[1:2], off v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[0:1] v_add_co_u32 v0, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo s_waitcnt vmcnt(0) global_store_b32 v[0:1], v2, off .LBB0_3: s_or_saveexec_b32 s4, s6 s_waitcnt lgkmcnt(0) v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v3, s2 s_xor_b32 exec_lo, exec_lo, s4 s_cbranch_execz .LBB0_5 s_load_b64 s[0:1], s[0:1], 0x10 v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v4, vcc_lo, s1, v1, vcc_lo .LBB0_5: s_or_b32 exec_lo, exec_lo, s4 v_mov_b32_e32 v0, 1 global_store_b32 v[3:4], v0, off .LBB0_6: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17select_winner_oddPiPyS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17select_winner_oddPiPyS_S_i, .Lfunc_end0-_Z17select_winner_oddPiPyS_S_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z18select_winner_evenPiPyS_S_i .globl _Z18select_winner_evenPiPyS_S_i .p2align 8 .type _Z18select_winner_evenPiPyS_S_i,@function _Z18select_winner_evenPiPyS_S_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x34 s_load_b32 s3, s[0:1], 0x20 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB1_6 s_load_b128 s[4:7], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 3, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v3, vcc_lo, s6, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo s_mov_b32 s6, exec_lo global_load_b64 v[3:4], v[3:4], off s_waitcnt vmcnt(0) v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v5, v3 v_lshrrev_b64 v[3:4], 30, v[3:4] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[5:6], 2, v[5:6] v_and_b32_e32 v0, -4, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v5, vcc_lo, s4, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo s_delay_alu instid0(VALU_DEP_3) v_add_co_u32 v3, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo s_clause 0x1 global_load_b32 v0, v[5:6], off global_load_b32 v3, v[3:4], off s_waitcnt vmcnt(0) v_cmpx_ne_u32_e64 v0, v3 s_xor_b32 s6, exec_lo, s6 s_cbranch_execz .LBB1_3 v_max_i32_e32 v1, v0, v3 v_min_i32_e32 v0, v0, v3 s_load_b64 s[2:3], s[0:1], 0x18 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[1:2], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v1, vcc_lo, s4, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo global_load_b32 v2, v[1:2], off v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[0:1] v_add_co_u32 v0, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo s_waitcnt vmcnt(0) global_store_b32 v[0:1], v2, off .LBB1_3: s_or_saveexec_b32 s4, s6 s_waitcnt lgkmcnt(0) v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v3, s2 s_xor_b32 exec_lo, exec_lo, s4 s_cbranch_execz .LBB1_5 s_load_b64 s[0:1], s[0:1], 0x10 v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v4, vcc_lo, s1, v1, vcc_lo .LBB1_5: s_or_b32 exec_lo, exec_lo, s4 v_mov_b32_e32 v0, 1 global_store_b32 v[3:4], v0, off .LBB1_6: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18select_winner_evenPiPyS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z18select_winner_evenPiPyS_S_i, .Lfunc_end1-_Z18select_winner_evenPiPyS_S_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z4jumpPiiS_ .globl _Z4jumpPiiS_ .p2align 8 .type _Z4jumpPiiS_,@function _Z4jumpPiiS_: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b32 s3, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB2_3 s_load_b64 s[2:3], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s2, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo global_load_b32 v3, v[0:1], off s_waitcnt vmcnt(0) v_ashrrev_i32_e32 v4, 31, v3 v_lshlrev_b64 v[4:5], 2, v[3:4] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v4, vcc_lo, s2, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo global_load_b32 v2, v[4:5], off s_waitcnt vmcnt(0) v_cmp_ne_u32_e32 vcc_lo, v3, v2 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB2_3 s_load_b64 s[0:1], s[0:1], 0x10 v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v4, 1 global_store_b32 v[0:1], v2, off s_waitcnt lgkmcnt(0) global_store_b32 v3, v4, s[0:1] .LBB2_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z4jumpPiiS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z4jumpPiiS_, .Lfunc_end2-_Z4jumpPiiS_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17select_winner_oddPiPyS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17select_winner_oddPiPyS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18select_winner_evenPiPyS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z18select_winner_evenPiPyS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z4jumpPiiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z4jumpPiiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z4jumpPiiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fca00078e0205 */ /*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */ /* 0x000ea4000c1e1900 */ /*00a0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */ /* 0x004fca00078e0205 */ /*00b0*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */ /* 0x000ea4000c1e1900 */ /*00c0*/ ISETP.NE.AND P0, PT, R0, R7, PT ; /* 0x000000070000720c */ /* 0x004fda0003f05270 */ /*00d0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*00e0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x1 ; /* 0x00000001ff097424 */ /* 0x000fe200078e00ff */ /*00f0*/ MOV R4, c[0x0][0x170] ; /* 0x00005c0000047a02 */ /* 0x000fe20000000f00 */ /*0100*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff057624 */ /* 0x000fe200078e00ff */ /*0110*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x000fe8000c101904 */ /*0120*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */ /* 0x000fe2000c101904 */ /*0130*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0140*/ BRA 0x140; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z18select_winner_evenPiPyS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */ /* 0x000fe200078e00ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0080*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */ /* 0x000fcc00078e0203 */ /*0090*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea4000c1e1b00 */ /*00a0*/ IMAD.SHL.U32 R6, R2.reuse, 0x4, RZ ; /* 0x0000000402067824 */ /* 0x044fe200078e00ff */ /*00b0*/ SHF.R.U64 R4, R2.reuse, 0x1e, R3.reuse ; /* 0x0000001e02047819 */ /* 0x140fe40000001203 */ /*00c0*/ SHF.L.U64.HI R7, R2, 0x2, R3.reuse ; /* 0x0000000202077819 */ /* 0x100fe40000010203 */ /*00d0*/ LOP3.LUT R6, R6, 0xfffffffc, RZ, 0xc0, !PT ; /* 0xfffffffc06067812 */ /* 0x000fe400078ec0ff */ /*00e0*/ SHF.R.U32.HI R5, RZ, 0x1e, R3 ; /* 0x0000001eff057819 */ /* 0x000fe40000011603 */ /*00f0*/ LOP3.LUT R4, R4, 0xfffffffc, RZ, 0xc0, !PT ; /* 0xfffffffc04047812 */ /* 0x000fc400078ec0ff */ /*0100*/ LOP3.LUT R7, R7, 0x3, RZ, 0xc0, !PT ; /* 0x0000000307077812 */ /* 0x000fe400078ec0ff */ /*0110*/ IADD3 R6, P0, R6, c[0x0][0x160], RZ ; /* 0x0000580006067a10 */ /* 0x000fe40007f1e0ff */ /*0120*/ LOP3.LUT R5, R5, 0x3, RZ, 0xc0, !PT ; /* 0x0000000305057812 */ /* 0x000fe400078ec0ff */ /*0130*/ IADD3 R4, P1, R4, c[0x0][0x160], RZ ; /* 0x0000580004047a10 */ /* 0x000fe40007f3e0ff */ /*0140*/ IADD3.X R7, R7, c[0x0][0x164], RZ, P0, !PT ; /* 0x0000590007077a10 */ /* 0x000fe400007fe4ff */ /*0150*/ IADD3.X R5, R5, c[0x0][0x164], RZ, P1, !PT ; /* 0x0000590005057a10 */ /* 0x000fc80000ffe4ff */ /*0160*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */ /* 0x000ea8000c1e1900 */ /*0170*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*0180*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */ /* 0x000fe40000011400 */ /*0190*/ ISETP.NE.AND P0, PT, R7, R4, PT ; /* 0x000000040700720c */ /* 0x004fda0003f05270 */ /*01a0*/ @!P0 BRA 0x270 ; /* 0x000000c000008947 */ /* 0x000fea0003800000 */ /*01b0*/ IMNMX R2, R4, R7, !PT ; /* 0x0000000704027217 */ /* 0x000fe20007800200 */ /*01c0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x000fc800078e00ff */ /*01d0*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fcc00078e0205 */ /*01e0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*01f0*/ IMNMX R4, R4, R7, PT ; /* 0x0000000704047217 */ /* 0x000fe20003800200 */ /*0200*/ IMAD.MOV.U32 R9, RZ, RZ, 0x1 ; /* 0x00000001ff097424 */ /* 0x000fe400078e00ff */ /*0210*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff067624 */ /* 0x000fe400078e00ff */ /*0220*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fc800078e0205 */ /*0230*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff077624 */ /* 0x000fe200078e00ff */ /*0240*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe8000c101904 */ /*0250*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*0260*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0270*/ LEA R2, P0, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000027a11 */ /* 0x000fe200078010ff */ /*0280*/ IMAD.MOV.U32 R5, RZ, RZ, 0x1 ; /* 0x00000001ff057424 */ /* 0x000fc600078e00ff */ /*0290*/ LEA.HI.X R3, R0, c[0x0][0x174], R3, 0x2, P0 ; /* 0x00005d0000037a11 */ /* 0x000fca00000f1403 */ /*02a0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*02b0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*02c0*/ BRA 0x2c0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z17select_winner_oddPiPyS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */ /* 0x000fe200078e00ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0080*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */ /* 0x000fcc00078e0203 */ /*0090*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea4000c1e1b00 */ /*00a0*/ SHF.R.S64 R6, RZ, 0x1e, R2 ; /* 0x0000001eff067819 */ /* 0x004fe40000001002 */ /*00b0*/ SHF.R.S32.HI R5, RZ, 0x1f, R3 ; /* 0x0000001fff057819 */ /* 0x000fe40000011403 */ /*00c0*/ IADD3 R6, P0, R6, c[0x0][0x160], RZ ; /* 0x0000580006067a10 */ /* 0x000fe40007f1e0ff */ /*00d0*/ LEA R4, P1, R3, c[0x0][0x160], 0x2 ; /* 0x0000580003047a11 */ /* 0x000fe400078210ff */ /*00e0*/ LEA.HI.X.SX32 R7, R2, c[0x0][0x164], 0x2, P0 ; /* 0x0000590002077a11 */ /* 0x000fc400000f16ff */ /*00f0*/ LEA.HI.X R5, R3, c[0x0][0x164], R5, 0x2, P1 ; /* 0x0000590003057a11 */ /* 0x000fc800008f1405 */ /*0100*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */ /* 0x000ea8000c1e1900 */ /*0110*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*0120*/ SHF.R.S32.HI R9, RZ, 0x1f, R0 ; /* 0x0000001fff097819 */ /* 0x000fe40000011400 */ /*0130*/ ISETP.NE.AND P0, PT, R7, R4, PT ; /* 0x000000040700720c */ /* 0x004fda0003f05270 */ /*0140*/ @!P0 BRA 0x210 ; /* 0x000000c000008947 */ /* 0x000fea0003800000 */ /*0150*/ IMNMX R2, R4, R7, PT ; /* 0x0000000704027217 */ /* 0x000fe20003800200 */ /*0160*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x000fc800078e00ff */ /*0170*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fcc00078e0205 */ /*0180*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*0190*/ IMNMX R4, R4, R7, !PT ; /* 0x0000000704047217 */ /* 0x000fe20007800200 */ /*01a0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x1 ; /* 0x00000001ff097424 */ /* 0x000fe400078e00ff */ /*01b0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff067624 */ /* 0x000fe400078e00ff */ /*01c0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fc800078e0205 */ /*01d0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff077624 */ /* 0x000fe200078e00ff */ /*01e0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe8000c101904 */ /*01f0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*0200*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0210*/ LEA R2, P0, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000027a11 */ /* 0x000fe200078010ff */ /*0220*/ IMAD.MOV.U32 R5, RZ, RZ, 0x1 ; /* 0x00000001ff057424 */ /* 0x000fc600078e00ff */ /*0230*/ LEA.HI.X R3, R0, c[0x0][0x174], R9, 0x2, P0 ; /* 0x00005d0000037a11 */ /* 0x000fca00000f1409 */ /*0240*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*0250*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0260*/ BRA 0x260; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0280*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0290*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17select_winner_oddPiPyS_S_i .globl _Z17select_winner_oddPiPyS_S_i .p2align 8 .type _Z17select_winner_oddPiPyS_S_i,@function _Z17select_winner_oddPiPyS_S_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x34 s_load_b32 s3, s[0:1], 0x20 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_6 s_load_b128 s[4:7], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 3, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v3, vcc_lo, s6, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo s_mov_b32 s6, exec_lo global_load_b64 v[3:4], v[3:4], off s_waitcnt vmcnt(0) v_ashrrev_i32_e32 v6, 31, v3 v_mov_b32_e32 v5, v3 v_ashrrev_i32_e32 v8, 31, v4 v_mov_b32_e32 v7, v4 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[3:4], 2, v[5:6] v_lshlrev_b64 v[5:6], 2, v[7:8] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v3, vcc_lo, s4, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v5, vcc_lo, s4, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo s_clause 0x1 global_load_b32 v0, v[3:4], off global_load_b32 v3, v[5:6], off s_waitcnt vmcnt(0) v_cmpx_ne_u32_e64 v0, v3 s_xor_b32 s6, exec_lo, s6 s_cbranch_execz .LBB0_3 v_min_i32_e32 v1, v0, v3 v_max_i32_e32 v0, v0, v3 s_load_b64 s[2:3], s[0:1], 0x18 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[1:2], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v1, vcc_lo, s4, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo global_load_b32 v2, v[1:2], off v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[0:1] v_add_co_u32 v0, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo s_waitcnt vmcnt(0) global_store_b32 v[0:1], v2, off .LBB0_3: s_or_saveexec_b32 s4, s6 s_waitcnt lgkmcnt(0) v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v3, s2 s_xor_b32 exec_lo, exec_lo, s4 s_cbranch_execz .LBB0_5 s_load_b64 s[0:1], s[0:1], 0x10 v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v4, vcc_lo, s1, v1, vcc_lo .LBB0_5: s_or_b32 exec_lo, exec_lo, s4 v_mov_b32_e32 v0, 1 global_store_b32 v[3:4], v0, off .LBB0_6: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17select_winner_oddPiPyS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17select_winner_oddPiPyS_S_i, .Lfunc_end0-_Z17select_winner_oddPiPyS_S_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z18select_winner_evenPiPyS_S_i .globl _Z18select_winner_evenPiPyS_S_i .p2align 8 .type _Z18select_winner_evenPiPyS_S_i,@function _Z18select_winner_evenPiPyS_S_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x34 s_load_b32 s3, s[0:1], 0x20 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB1_6 s_load_b128 s[4:7], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 3, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v3, vcc_lo, s6, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo s_mov_b32 s6, exec_lo global_load_b64 v[3:4], v[3:4], off s_waitcnt vmcnt(0) v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v5, v3 v_lshrrev_b64 v[3:4], 30, v[3:4] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[5:6], 2, v[5:6] v_and_b32_e32 v0, -4, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v5, vcc_lo, s4, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo s_delay_alu instid0(VALU_DEP_3) v_add_co_u32 v3, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo s_clause 0x1 global_load_b32 v0, v[5:6], off global_load_b32 v3, v[3:4], off s_waitcnt vmcnt(0) v_cmpx_ne_u32_e64 v0, v3 s_xor_b32 s6, exec_lo, s6 s_cbranch_execz .LBB1_3 v_max_i32_e32 v1, v0, v3 v_min_i32_e32 v0, v0, v3 s_load_b64 s[2:3], s[0:1], 0x18 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[1:2], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v1, vcc_lo, s4, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo global_load_b32 v2, v[1:2], off v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[0:1] v_add_co_u32 v0, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo s_waitcnt vmcnt(0) global_store_b32 v[0:1], v2, off .LBB1_3: s_or_saveexec_b32 s4, s6 s_waitcnt lgkmcnt(0) v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v3, s2 s_xor_b32 exec_lo, exec_lo, s4 s_cbranch_execz .LBB1_5 s_load_b64 s[0:1], s[0:1], 0x10 v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v4, vcc_lo, s1, v1, vcc_lo .LBB1_5: s_or_b32 exec_lo, exec_lo, s4 v_mov_b32_e32 v0, 1 global_store_b32 v[3:4], v0, off .LBB1_6: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18select_winner_evenPiPyS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z18select_winner_evenPiPyS_S_i, .Lfunc_end1-_Z18select_winner_evenPiPyS_S_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z4jumpPiiS_ .globl _Z4jumpPiiS_ .p2align 8 .type _Z4jumpPiiS_,@function _Z4jumpPiiS_: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b32 s3, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB2_3 s_load_b64 s[2:3], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s2, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo global_load_b32 v3, v[0:1], off s_waitcnt vmcnt(0) v_ashrrev_i32_e32 v4, 31, v3 v_lshlrev_b64 v[4:5], 2, v[3:4] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v4, vcc_lo, s2, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo global_load_b32 v2, v[4:5], off s_waitcnt vmcnt(0) v_cmp_ne_u32_e32 vcc_lo, v3, v2 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB2_3 s_load_b64 s[0:1], s[0:1], 0x10 v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v4, 1 global_store_b32 v[0:1], v2, off s_waitcnt lgkmcnt(0) global_store_b32 v3, v4, s[0:1] .LBB2_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z4jumpPiiS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z4jumpPiiS_, .Lfunc_end2-_Z4jumpPiiS_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17select_winner_oddPiPyS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17select_winner_oddPiPyS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18select_winner_evenPiPyS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z18select_winner_evenPiPyS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z4jumpPiiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z4jumpPiiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> #include <algorithm> #include <list> #include <vector> #include <iterator> #include <functional> #include <time.h> #include <chrono> #include <cstdlib> using namespace std; struct triple { long long int set; //Set denotes which Elements are in the Subset long int w; //Weight of the Triple //d long int p; //Profit of the Triple //d //Struct Constructor triple() : set(0), w(0.0), p(0.0) {} //Comparison Operator Overloadings bool operator< (const triple &t) const { return (w < t.w); } bool operator> (const triple &t) const { return (w > t.w); } }; void merge_lists(vector<triple> &A, vector<triple> &B, vector< pair<long long int, long long int> > &V) { vector<triple> T_p, Tcopy; triple t; long long int v1s = V.size() >> 1, v2s = V.size() - v1s; //Initialisation for A t.set = 0, t.w = t.p = 0; A.push_back(t); //Sort A in Non-Increasing Order for (long long int i = 0; i < v1s; ++i) { T_p.clear(); Tcopy.clear(); //Add Elements to Subset (Triple) ti //Add ti to T_p for (long long int j = 0; j < (long long int)A.size(); ++j) { t.set = A[j].set + (1 << i); t.w = A[j].w + V[i].first; t.p = A[j].p + V[i].second; T_p.push_back(t); } //Merge A, T_p merge(A.begin(), A.end(), T_p.begin(), T_p.end(), back_inserter(Tcopy)); A = Tcopy; } //Initialisation for B t.set = 0, t.w = t.p = 0; B.push_back(t); //Sort B in Non-Increasing Order for (long long int i = 0; i < v2s; ++i) { T_p.clear(); Tcopy.clear(); //Add Elements to Subset (Triple) ti //Add ti to T_p for (long long int j = 0; j < (long long int)B.size(); ++j) { t.set = B[j].set + (1 << i); t.w = B[j].w + V[i + v1s].first; t.p = B[j].p + V[i + v1s].second; T_p.push_back(t); } //Merge B, T_p merge(B.begin(), B.end(), T_p.begin(), T_p.end(), back_inserter(Tcopy), greater<struct triple>()); B = Tcopy; } } void maxScan(vector<triple> &B, vector< pair<int, long int> > &maxB) { long int Bsize = B.size(); maxB[Bsize - 1].first = B[Bsize - 1].p; maxB[Bsize - 1].second = Bsize - 1; for (long int i = Bsize - 2; i >= 0; i--) { if (B[i].p>maxB[i + 1].first) { maxB[i].first = B[i].p; maxB[i].second = i; } else { maxB[i].first = maxB[i + 1].first; maxB[i].second = maxB[i + 1].second; } } } long int generate_sets(vector<triple> &A, vector<triple> &B, const int &c, vector< pair<int, long long int> > &maxB, long int N) { int bestValue = 0; pair<long long int, long long int> bestSet; long long int i = 0, j = 0; while (i < N && j < N) { if (A[i].w + B[j].w > c) { ++j; if (j == N) break; else continue; } if (A[i].p + maxB[j].first > bestValue) { bestValue = A[i].p + maxB[j].first; bestSet = make_pair(A[i].set, maxB[j].second); } ++i; } return bestValue; } void dp_knapSack(long long int W, double wt[], double val[], long long int n) { long long int i, w; vector< vector<double> > K(n + 1, vector<double>(W + 1)); // Build table K[][] in bottom up manner for (i = 0; i <= n; i++) { for (w = 0; w <= W; w++) { if (i == 0 || w == 0) K[i][w] = 0; else if (wt[i - 1] <= w) K[i][w] = max(val[i - 1] + K[i - 1][w - wt[i - 1]], K[i - 1][w]); else K[i][w] = K[i - 1][w]; } } cout << "\n\n\tBest_DP: " << K[n][W] << endl; } //Input : Sorted Lists -> (A, B) //Output : Partitioned Sorted Lists -> (Ak, Bk) with N/k elements each void list_to_blocks(vector<triple> &A, vector<triple> &B, vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, int k) { long long int e = A.size() / k, i; vector<triple>::iterator Ait, Bit; Ait = A.begin(), Bit = B.begin(); //#pragma omp parallel for shared(A, B, Ak, Bk, e, k) private(i, Ait, Bit) for (i = 0; i < k; ++i) { Ait = A.begin() + i * e; Bit = B.begin() + i * e; copy(Ait, Ait + e, back_inserter(Ak[i])); copy(Bit, Bit + e, back_inserter(Bk[i])); } } //Input : Partitioned Sorted Lists -> (Ak, Bk) //Output : Maximum Profit of Blocks -> (maxAi, maxBi) void fsave_max_val(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, vector<double> &maxA, vector<double> &maxB) { //Needs to be dynamic if not equally partitioned (if N/k not an int) long long int e = maxA.size(), i, j; double Amax, Bmax; #pragma omp parallel for shared(Ak, Bk, maxA, maxB) private(e, i, j, Amax, Bmax) //Here for (i = 0; i < e; ++i) { Amax = Ak[i][0].p; Bmax = Bk[i][0].p; //Perform Parallel Max Search for Better Result for (j = 1; j < Ak[i].size(); ++j) { Amax = (Amax < Ak[i][j].p) ? Ak[i][j].p : Amax; Bmax = (Bmax < Bk[i][j].p) ? Bk[i][j].p : Bmax; } maxA[i] = Amax; maxB[i] = Bmax; } } //Input : Ak, Bk, maxAi, maxBi //Output : Blocks that are within Capacity c void prune(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, double c, vector<double> &maxA, vector<double> &maxB, vector< vector<int> > &candidate, double &bestValue) { int Z, Y; int i, j, k = Ak.size(), e = Ak[0].size(); vector<int> maxValue(k); #pragma omp parallel for reduction(max:bestValue) shared(Ak, Bk, maxA, maxB, maxValue, candidate) private(i, j, Z, Y,c, e) for (i = 0; i < k; ++i) { maxValue[i] = 0; for (j = 0; j < k; ++j) //Here - will lead to CR { Z = Ak[i][0].w + Bk[j][e - 1].w; Y = Ak[i][e - 1].w + Bk[j][0].w; if (Y <= c) { if (maxA[i] + maxB[j] > maxValue[i]) maxValue[i] = maxA[i] + maxB[j]; if (bestValue<maxValue[i]) //Here bestValue = maxValue[i]; } else if (Z <= c && Y > c) candidate[i].push_back(j); // here make copy of block bk[j] } } } //Input : Candidate Block Pairs -> candidate //Output : (Max[i][j][t], L[j][t]) with reference to candidate[i] void ssave_max_val(vector< vector<triple> > &Bk, vector< vector< vector< pair<double, long long int> > > > &Max, vector< vector<int> > &candidate, double &bestValue) { int i, t, l, k = Bk.size(); int j, e = Bk[0].size(); #pragma omp parallel for shared(Bk,candidate,Max) private(i,j,t,l,k,e) //Here for (i = 0; i < k; ++i) { for (t = 0; t < candidate[i].size(); ++t) { //l is the Index of The Block Partition B of the Candidate Block Pair (Bk[l]) l = candidate[i][t]; //Initialise Last Element and Index Max[i][e - 1][t].first = Bk[l][e - 1].p; Max[i][e - 1][t].second = e - 1; //Reverse Inclusive Max-Scan for (j = e - 2; j > -1; --j) { if (Bk[l][j].p > Max[i][j + 1][t].first) { Max[i][j][t].first = Bk[l][j].p; Max[i][j][t].second = j; } else { Max[i][j][t].first = Max[i][j + 1][t].first; Max[i][j][t].second = Max[i][j + 1][t].second; } } } } } //Input : candidate, Max //Output : Best Value void par_search(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, double c, vector< vector<int> > &candidate, vector< vector< vector< pair<double, long long int> > > > &Max, double &bestValue) { int i, j, t, l, k = Ak.size(); long long int e = Ak[0].size(), X, Y; vector<double> maxValue(k); vector< pair<long long int, long long int> > Xi(k); //Xi -> (Index ID of Subset A, Index ID of Subset B) #pragma omp parallel for shared(Ak, Bk, candidate, Max, Xi, maxValue) private(i, j, t, l, X, Y, e,k) //Here for (i = 0; i < k; ++i) { maxValue[i]=0; Xi[i].first = 0, Xi[i].second = 0;// Here for (t = 0; t < candidate[i].size(); ++t) { l = candidate[i][t]; X = 0, Y = 0; while (X < e && Y < e) { if (Ak[i][X].w + Bk[l][Y].w > c) { ++Y; continue; } else if (Ak[i][X].p + Max[i][Y][t].first > maxValue[i]) { maxValue[i] = Ak[i][X].p + Max[i][Y][t].first; Xi[i].first = Ak[i][X].set; Xi[i].second = Bk[l][Max[i][Y][t].second].set; } ++X; } } } //Evaluate Maximum Profit from max(maxValue[i]) long long int X1 = Xi[0].first, X2 = Xi[0].second; for (i = 0; i < k; ++i) if (bestValue < maxValue[i]) { bestValue = maxValue[i]; X1 = Xi[i].first; X2 = Xi[i].second; } //The Subset ID from A, Subset ID from B which gives Maximum Profit (Best Value) cout << "\n\tSubsets : " << X1 << ", " << X2 << endl; cout << "\tBestvalue : " << bestValue << endl; } int main() { //Input Data int c = 0; vector< pair<long long int, long long int> > V; vector<double> wt_arr, p_arr; srand(time(0)); //Number of Items int num_items = 10; //Input Data for (int i = 0; i < num_items; ++i) { double wt = rand() % (long int)1e7; double p = rand() % (long int)1e7; c += wt; V.push_back(make_pair(wt, p)); wt_arr.push_back(wt); p_arr.push_back(p); } //Set capacity c /= 2; printf("\n\tCapacity = %d\n", c); //Computation & Timing auto start = chrono::steady_clock::now(); /* [Ak, Bk] -> Ak has k Blocks with N/k elements each [maxA, maxB] -> maxI has one Element for each Block of List I [candidate] -> candidate[i] is a Vector of Blocks of Bk, which are candidate solutions with Ak[i] [Max[i][j][t], L[j][t]] -> Pair of Maximum Profit & Respective Index with reference to candidate[i] */ int k = 4; //Number of Partitions long long int N = 1 <<( num_items >> 12); //Number of Subsets long long int e = N / k; //Number of Elements per Subset double bestValue = -1; //d vector<triple> A, B; vector< vector<triple> > Ak(k, vector<triple>()); vector< vector<triple> > Bk(k, vector<triple>()); vector<double> maxA(k), maxB(k); vector< vector<int> > candidate(k); vector< vector< vector< pair<double, long long int> > > > Max(k, vector< vector< pair<double, long long int> > >(e, vector< pair<double, long long int> >(2))); merge_lists(A, B, V); //Currently Serial Merging list_to_blocks(A, B, Ak, Bk, k); //Partition Lists to Blocks fsave_max_val(Ak, Bk, maxA, maxB); //Save prune(Ak, Bk, c, maxA, maxB, candidate, bestValue); ssave_max_val(Bk, Max, candidate, bestValue); //par_search(Ak, Bk, c, candidate, Max, bestValue); auto stop = chrono::steady_clock::now(); cout << "\n Computational Time (Parallel) : "; cout << (int)(chrono::duration_cast<chrono::nanoseconds>(stop - start).count()) / 1000000.0; cout << " ms" << endl; //Time the Serial DP Approach start = chrono::steady_clock::now(); //dp_knapSack(c, &wt_arr[0], &p_arr[0], V.size()); stop = chrono::steady_clock::now(); cout << "\n Computational Time (DP Serial) : "; cout << (int)(chrono::duration_cast<chrono::nanoseconds>(stop - start).count()) / 1000000.0; cout << " ms" << endl; cin.get(); return 0; }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <algorithm> #include <list> #include <vector> #include <iterator> #include <functional> #include <time.h> #include <chrono> #include <cstdlib> using namespace std; struct triple { long long int set; //Set denotes which Elements are in the Subset long int w; //Weight of the Triple //d long int p; //Profit of the Triple //d //Struct Constructor triple() : set(0), w(0.0), p(0.0) {} //Comparison Operator Overloadings bool operator< (const triple &t) const { return (w < t.w); } bool operator> (const triple &t) const { return (w > t.w); } }; void merge_lists(vector<triple> &A, vector<triple> &B, vector< pair<long long int, long long int> > &V) { vector<triple> T_p, Tcopy; triple t; long long int v1s = V.size() >> 1, v2s = V.size() - v1s; //Initialisation for A t.set = 0, t.w = t.p = 0; A.push_back(t); //Sort A in Non-Increasing Order for (long long int i = 0; i < v1s; ++i) { T_p.clear(); Tcopy.clear(); //Add Elements to Subset (Triple) ti //Add ti to T_p for (long long int j = 0; j < (long long int)A.size(); ++j) { t.set = A[j].set + (1 << i); t.w = A[j].w + V[i].first; t.p = A[j].p + V[i].second; T_p.push_back(t); } //Merge A, T_p merge(A.begin(), A.end(), T_p.begin(), T_p.end(), back_inserter(Tcopy)); A = Tcopy; } //Initialisation for B t.set = 0, t.w = t.p = 0; B.push_back(t); //Sort B in Non-Increasing Order for (long long int i = 0; i < v2s; ++i) { T_p.clear(); Tcopy.clear(); //Add Elements to Subset (Triple) ti //Add ti to T_p for (long long int j = 0; j < (long long int)B.size(); ++j) { t.set = B[j].set + (1 << i); t.w = B[j].w + V[i + v1s].first; t.p = B[j].p + V[i + v1s].second; T_p.push_back(t); } //Merge B, T_p merge(B.begin(), B.end(), T_p.begin(), T_p.end(), back_inserter(Tcopy), greater<struct triple>()); B = Tcopy; } } void maxScan(vector<triple> &B, vector< pair<int, long int> > &maxB) { long int Bsize = B.size(); maxB[Bsize - 1].first = B[Bsize - 1].p; maxB[Bsize - 1].second = Bsize - 1; for (long int i = Bsize - 2; i >= 0; i--) { if (B[i].p>maxB[i + 1].first) { maxB[i].first = B[i].p; maxB[i].second = i; } else { maxB[i].first = maxB[i + 1].first; maxB[i].second = maxB[i + 1].second; } } } long int generate_sets(vector<triple> &A, vector<triple> &B, const int &c, vector< pair<int, long long int> > &maxB, long int N) { int bestValue = 0; pair<long long int, long long int> bestSet; long long int i = 0, j = 0; while (i < N && j < N) { if (A[i].w + B[j].w > c) { ++j; if (j == N) break; else continue; } if (A[i].p + maxB[j].first > bestValue) { bestValue = A[i].p + maxB[j].first; bestSet = make_pair(A[i].set, maxB[j].second); } ++i; } return bestValue; } void dp_knapSack(long long int W, double wt[], double val[], long long int n) { long long int i, w; vector< vector<double> > K(n + 1, vector<double>(W + 1)); // Build table K[][] in bottom up manner for (i = 0; i <= n; i++) { for (w = 0; w <= W; w++) { if (i == 0 || w == 0) K[i][w] = 0; else if (wt[i - 1] <= w) K[i][w] = max(val[i - 1] + K[i - 1][w - wt[i - 1]], K[i - 1][w]); else K[i][w] = K[i - 1][w]; } } cout << "\n\n\tBest_DP: " << K[n][W] << endl; } //Input : Sorted Lists -> (A, B) //Output : Partitioned Sorted Lists -> (Ak, Bk) with N/k elements each void list_to_blocks(vector<triple> &A, vector<triple> &B, vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, int k) { long long int e = A.size() / k, i; vector<triple>::iterator Ait, Bit; Ait = A.begin(), Bit = B.begin(); //#pragma omp parallel for shared(A, B, Ak, Bk, e, k) private(i, Ait, Bit) for (i = 0; i < k; ++i) { Ait = A.begin() + i * e; Bit = B.begin() + i * e; copy(Ait, Ait + e, back_inserter(Ak[i])); copy(Bit, Bit + e, back_inserter(Bk[i])); } } //Input : Partitioned Sorted Lists -> (Ak, Bk) //Output : Maximum Profit of Blocks -> (maxAi, maxBi) void fsave_max_val(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, vector<double> &maxA, vector<double> &maxB) { //Needs to be dynamic if not equally partitioned (if N/k not an int) long long int e = maxA.size(), i, j; double Amax, Bmax; #pragma omp parallel for shared(Ak, Bk, maxA, maxB) private(e, i, j, Amax, Bmax) //Here for (i = 0; i < e; ++i) { Amax = Ak[i][0].p; Bmax = Bk[i][0].p; //Perform Parallel Max Search for Better Result for (j = 1; j < Ak[i].size(); ++j) { Amax = (Amax < Ak[i][j].p) ? Ak[i][j].p : Amax; Bmax = (Bmax < Bk[i][j].p) ? Bk[i][j].p : Bmax; } maxA[i] = Amax; maxB[i] = Bmax; } } //Input : Ak, Bk, maxAi, maxBi //Output : Blocks that are within Capacity c void prune(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, double c, vector<double> &maxA, vector<double> &maxB, vector< vector<int> > &candidate, double &bestValue) { int Z, Y; int i, j, k = Ak.size(), e = Ak[0].size(); vector<int> maxValue(k); #pragma omp parallel for reduction(max:bestValue) shared(Ak, Bk, maxA, maxB, maxValue, candidate) private(i, j, Z, Y,c, e) for (i = 0; i < k; ++i) { maxValue[i] = 0; for (j = 0; j < k; ++j) //Here - will lead to CR { Z = Ak[i][0].w + Bk[j][e - 1].w; Y = Ak[i][e - 1].w + Bk[j][0].w; if (Y <= c) { if (maxA[i] + maxB[j] > maxValue[i]) maxValue[i] = maxA[i] + maxB[j]; if (bestValue<maxValue[i]) //Here bestValue = maxValue[i]; } else if (Z <= c && Y > c) candidate[i].push_back(j); // here make copy of block bk[j] } } } //Input : Candidate Block Pairs -> candidate //Output : (Max[i][j][t], L[j][t]) with reference to candidate[i] void ssave_max_val(vector< vector<triple> > &Bk, vector< vector< vector< pair<double, long long int> > > > &Max, vector< vector<int> > &candidate, double &bestValue) { int i, t, l, k = Bk.size(); int j, e = Bk[0].size(); #pragma omp parallel for shared(Bk,candidate,Max) private(i,j,t,l,k,e) //Here for (i = 0; i < k; ++i) { for (t = 0; t < candidate[i].size(); ++t) { //l is the Index of The Block Partition B of the Candidate Block Pair (Bk[l]) l = candidate[i][t]; //Initialise Last Element and Index Max[i][e - 1][t].first = Bk[l][e - 1].p; Max[i][e - 1][t].second = e - 1; //Reverse Inclusive Max-Scan for (j = e - 2; j > -1; --j) { if (Bk[l][j].p > Max[i][j + 1][t].first) { Max[i][j][t].first = Bk[l][j].p; Max[i][j][t].second = j; } else { Max[i][j][t].first = Max[i][j + 1][t].first; Max[i][j][t].second = Max[i][j + 1][t].second; } } } } } //Input : candidate, Max //Output : Best Value void par_search(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, double c, vector< vector<int> > &candidate, vector< vector< vector< pair<double, long long int> > > > &Max, double &bestValue) { int i, j, t, l, k = Ak.size(); long long int e = Ak[0].size(), X, Y; vector<double> maxValue(k); vector< pair<long long int, long long int> > Xi(k); //Xi -> (Index ID of Subset A, Index ID of Subset B) #pragma omp parallel for shared(Ak, Bk, candidate, Max, Xi, maxValue) private(i, j, t, l, X, Y, e,k) //Here for (i = 0; i < k; ++i) { maxValue[i]=0; Xi[i].first = 0, Xi[i].second = 0;// Here for (t = 0; t < candidate[i].size(); ++t) { l = candidate[i][t]; X = 0, Y = 0; while (X < e && Y < e) { if (Ak[i][X].w + Bk[l][Y].w > c) { ++Y; continue; } else if (Ak[i][X].p + Max[i][Y][t].first > maxValue[i]) { maxValue[i] = Ak[i][X].p + Max[i][Y][t].first; Xi[i].first = Ak[i][X].set; Xi[i].second = Bk[l][Max[i][Y][t].second].set; } ++X; } } } //Evaluate Maximum Profit from max(maxValue[i]) long long int X1 = Xi[0].first, X2 = Xi[0].second; for (i = 0; i < k; ++i) if (bestValue < maxValue[i]) { bestValue = maxValue[i]; X1 = Xi[i].first; X2 = Xi[i].second; } //The Subset ID from A, Subset ID from B which gives Maximum Profit (Best Value) cout << "\n\tSubsets : " << X1 << ", " << X2 << endl; cout << "\tBestvalue : " << bestValue << endl; } int main() { //Input Data int c = 0; vector< pair<long long int, long long int> > V; vector<double> wt_arr, p_arr; srand(time(0)); //Number of Items int num_items = 10; //Input Data for (int i = 0; i < num_items; ++i) { double wt = rand() % (long int)1e7; double p = rand() % (long int)1e7; c += wt; V.push_back(make_pair(wt, p)); wt_arr.push_back(wt); p_arr.push_back(p); } //Set capacity c /= 2; printf("\n\tCapacity = %d\n", c); //Computation & Timing auto start = chrono::steady_clock::now(); /* [Ak, Bk] -> Ak has k Blocks with N/k elements each [maxA, maxB] -> maxI has one Element for each Block of List I [candidate] -> candidate[i] is a Vector of Blocks of Bk, which are candidate solutions with Ak[i] [Max[i][j][t], L[j][t]] -> Pair of Maximum Profit & Respective Index with reference to candidate[i] */ int k = 4; //Number of Partitions long long int N = 1 <<( num_items >> 12); //Number of Subsets long long int e = N / k; //Number of Elements per Subset double bestValue = -1; //d vector<triple> A, B; vector< vector<triple> > Ak(k, vector<triple>()); vector< vector<triple> > Bk(k, vector<triple>()); vector<double> maxA(k), maxB(k); vector< vector<int> > candidate(k); vector< vector< vector< pair<double, long long int> > > > Max(k, vector< vector< pair<double, long long int> > >(e, vector< pair<double, long long int> >(2))); merge_lists(A, B, V); //Currently Serial Merging list_to_blocks(A, B, Ak, Bk, k); //Partition Lists to Blocks fsave_max_val(Ak, Bk, maxA, maxB); //Save prune(Ak, Bk, c, maxA, maxB, candidate, bestValue); ssave_max_val(Bk, Max, candidate, bestValue); //par_search(Ak, Bk, c, candidate, Max, bestValue); auto stop = chrono::steady_clock::now(); cout << "\n Computational Time (Parallel) : "; cout << (int)(chrono::duration_cast<chrono::nanoseconds>(stop - start).count()) / 1000000.0; cout << " ms" << endl; //Time the Serial DP Approach start = chrono::steady_clock::now(); //dp_knapSack(c, &wt_arr[0], &p_arr[0], V.size()); stop = chrono::steady_clock::now(); cout << "\n Computational Time (DP Serial) : "; cout << (int)(chrono::duration_cast<chrono::nanoseconds>(stop - start).count()) / 1000000.0; cout << " ms" << endl; cin.get(); return 0; }
#include <hip/hip_runtime.h> #include <iostream> #include <algorithm> #include <list> #include <vector> #include <iterator> #include <functional> #include <time.h> #include <chrono> #include <cstdlib> using namespace std; struct triple { long long int set; //Set denotes which Elements are in the Subset long int w; //Weight of the Triple //d long int p; //Profit of the Triple //d //Struct Constructor triple() : set(0), w(0.0), p(0.0) {} //Comparison Operator Overloadings bool operator< (const triple &t) const { return (w < t.w); } bool operator> (const triple &t) const { return (w > t.w); } }; void merge_lists(vector<triple> &A, vector<triple> &B, vector< pair<long long int, long long int> > &V) { vector<triple> T_p, Tcopy; triple t; long long int v1s = V.size() >> 1, v2s = V.size() - v1s; //Initialisation for A t.set = 0, t.w = t.p = 0; A.push_back(t); //Sort A in Non-Increasing Order for (long long int i = 0; i < v1s; ++i) { T_p.clear(); Tcopy.clear(); //Add Elements to Subset (Triple) ti //Add ti to T_p for (long long int j = 0; j < (long long int)A.size(); ++j) { t.set = A[j].set + (1 << i); t.w = A[j].w + V[i].first; t.p = A[j].p + V[i].second; T_p.push_back(t); } //Merge A, T_p merge(A.begin(), A.end(), T_p.begin(), T_p.end(), back_inserter(Tcopy)); A = Tcopy; } //Initialisation for B t.set = 0, t.w = t.p = 0; B.push_back(t); //Sort B in Non-Increasing Order for (long long int i = 0; i < v2s; ++i) { T_p.clear(); Tcopy.clear(); //Add Elements to Subset (Triple) ti //Add ti to T_p for (long long int j = 0; j < (long long int)B.size(); ++j) { t.set = B[j].set + (1 << i); t.w = B[j].w + V[i + v1s].first; t.p = B[j].p + V[i + v1s].second; T_p.push_back(t); } //Merge B, T_p merge(B.begin(), B.end(), T_p.begin(), T_p.end(), back_inserter(Tcopy), greater<struct triple>()); B = Tcopy; } } void maxScan(vector<triple> &B, vector< pair<int, long int> > &maxB) { long int Bsize = B.size(); maxB[Bsize - 1].first = B[Bsize - 1].p; maxB[Bsize - 1].second = Bsize - 1; for (long int i = Bsize - 2; i >= 0; i--) { if (B[i].p>maxB[i + 1].first) { maxB[i].first = B[i].p; maxB[i].second = i; } else { maxB[i].first = maxB[i + 1].first; maxB[i].second = maxB[i + 1].second; } } } long int generate_sets(vector<triple> &A, vector<triple> &B, const int &c, vector< pair<int, long long int> > &maxB, long int N) { int bestValue = 0; pair<long long int, long long int> bestSet; long long int i = 0, j = 0; while (i < N && j < N) { if (A[i].w + B[j].w > c) { ++j; if (j == N) break; else continue; } if (A[i].p + maxB[j].first > bestValue) { bestValue = A[i].p + maxB[j].first; bestSet = make_pair(A[i].set, maxB[j].second); } ++i; } return bestValue; } void dp_knapSack(long long int W, double wt[], double val[], long long int n) { long long int i, w; vector< vector<double> > K(n + 1, vector<double>(W + 1)); // Build table K[][] in bottom up manner for (i = 0; i <= n; i++) { for (w = 0; w <= W; w++) { if (i == 0 || w == 0) K[i][w] = 0; else if (wt[i - 1] <= w) K[i][w] = max(val[i - 1] + K[i - 1][w - wt[i - 1]], K[i - 1][w]); else K[i][w] = K[i - 1][w]; } } cout << "\n\n\tBest_DP: " << K[n][W] << endl; } //Input : Sorted Lists -> (A, B) //Output : Partitioned Sorted Lists -> (Ak, Bk) with N/k elements each void list_to_blocks(vector<triple> &A, vector<triple> &B, vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, int k) { long long int e = A.size() / k, i; vector<triple>::iterator Ait, Bit; Ait = A.begin(), Bit = B.begin(); //#pragma omp parallel for shared(A, B, Ak, Bk, e, k) private(i, Ait, Bit) for (i = 0; i < k; ++i) { Ait = A.begin() + i * e; Bit = B.begin() + i * e; copy(Ait, Ait + e, back_inserter(Ak[i])); copy(Bit, Bit + e, back_inserter(Bk[i])); } } //Input : Partitioned Sorted Lists -> (Ak, Bk) //Output : Maximum Profit of Blocks -> (maxAi, maxBi) void fsave_max_val(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, vector<double> &maxA, vector<double> &maxB) { //Needs to be dynamic if not equally partitioned (if N/k not an int) long long int e = maxA.size(), i, j; double Amax, Bmax; #pragma omp parallel for shared(Ak, Bk, maxA, maxB) private(e, i, j, Amax, Bmax) //Here for (i = 0; i < e; ++i) { Amax = Ak[i][0].p; Bmax = Bk[i][0].p; //Perform Parallel Max Search for Better Result for (j = 1; j < Ak[i].size(); ++j) { Amax = (Amax < Ak[i][j].p) ? Ak[i][j].p : Amax; Bmax = (Bmax < Bk[i][j].p) ? Bk[i][j].p : Bmax; } maxA[i] = Amax; maxB[i] = Bmax; } } //Input : Ak, Bk, maxAi, maxBi //Output : Blocks that are within Capacity c void prune(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, double c, vector<double> &maxA, vector<double> &maxB, vector< vector<int> > &candidate, double &bestValue) { int Z, Y; int i, j, k = Ak.size(), e = Ak[0].size(); vector<int> maxValue(k); #pragma omp parallel for reduction(max:bestValue) shared(Ak, Bk, maxA, maxB, maxValue, candidate) private(i, j, Z, Y,c, e) for (i = 0; i < k; ++i) { maxValue[i] = 0; for (j = 0; j < k; ++j) //Here - will lead to CR { Z = Ak[i][0].w + Bk[j][e - 1].w; Y = Ak[i][e - 1].w + Bk[j][0].w; if (Y <= c) { if (maxA[i] + maxB[j] > maxValue[i]) maxValue[i] = maxA[i] + maxB[j]; if (bestValue<maxValue[i]) //Here bestValue = maxValue[i]; } else if (Z <= c && Y > c) candidate[i].push_back(j); // here make copy of block bk[j] } } } //Input : Candidate Block Pairs -> candidate //Output : (Max[i][j][t], L[j][t]) with reference to candidate[i] void ssave_max_val(vector< vector<triple> > &Bk, vector< vector< vector< pair<double, long long int> > > > &Max, vector< vector<int> > &candidate, double &bestValue) { int i, t, l, k = Bk.size(); int j, e = Bk[0].size(); #pragma omp parallel for shared(Bk,candidate,Max) private(i,j,t,l,k,e) //Here for (i = 0; i < k; ++i) { for (t = 0; t < candidate[i].size(); ++t) { //l is the Index of The Block Partition B of the Candidate Block Pair (Bk[l]) l = candidate[i][t]; //Initialise Last Element and Index Max[i][e - 1][t].first = Bk[l][e - 1].p; Max[i][e - 1][t].second = e - 1; //Reverse Inclusive Max-Scan for (j = e - 2; j > -1; --j) { if (Bk[l][j].p > Max[i][j + 1][t].first) { Max[i][j][t].first = Bk[l][j].p; Max[i][j][t].second = j; } else { Max[i][j][t].first = Max[i][j + 1][t].first; Max[i][j][t].second = Max[i][j + 1][t].second; } } } } } //Input : candidate, Max //Output : Best Value void par_search(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, double c, vector< vector<int> > &candidate, vector< vector< vector< pair<double, long long int> > > > &Max, double &bestValue) { int i, j, t, l, k = Ak.size(); long long int e = Ak[0].size(), X, Y; vector<double> maxValue(k); vector< pair<long long int, long long int> > Xi(k); //Xi -> (Index ID of Subset A, Index ID of Subset B) #pragma omp parallel for shared(Ak, Bk, candidate, Max, Xi, maxValue) private(i, j, t, l, X, Y, e,k) //Here for (i = 0; i < k; ++i) { maxValue[i]=0; Xi[i].first = 0, Xi[i].second = 0;// Here for (t = 0; t < candidate[i].size(); ++t) { l = candidate[i][t]; X = 0, Y = 0; while (X < e && Y < e) { if (Ak[i][X].w + Bk[l][Y].w > c) { ++Y; continue; } else if (Ak[i][X].p + Max[i][Y][t].first > maxValue[i]) { maxValue[i] = Ak[i][X].p + Max[i][Y][t].first; Xi[i].first = Ak[i][X].set; Xi[i].second = Bk[l][Max[i][Y][t].second].set; } ++X; } } } //Evaluate Maximum Profit from max(maxValue[i]) long long int X1 = Xi[0].first, X2 = Xi[0].second; for (i = 0; i < k; ++i) if (bestValue < maxValue[i]) { bestValue = maxValue[i]; X1 = Xi[i].first; X2 = Xi[i].second; } //The Subset ID from A, Subset ID from B which gives Maximum Profit (Best Value) cout << "\n\tSubsets : " << X1 << ", " << X2 << endl; cout << "\tBestvalue : " << bestValue << endl; } int main() { //Input Data int c = 0; vector< pair<long long int, long long int> > V; vector<double> wt_arr, p_arr; srand(time(0)); //Number of Items int num_items = 10; //Input Data for (int i = 0; i < num_items; ++i) { double wt = rand() % (long int)1e7; double p = rand() % (long int)1e7; c += wt; V.push_back(make_pair(wt, p)); wt_arr.push_back(wt); p_arr.push_back(p); } //Set capacity c /= 2; printf("\n\tCapacity = %d\n", c); //Computation & Timing auto start = chrono::steady_clock::now(); /* [Ak, Bk] -> Ak has k Blocks with N/k elements each [maxA, maxB] -> maxI has one Element for each Block of List I [candidate] -> candidate[i] is a Vector of Blocks of Bk, which are candidate solutions with Ak[i] [Max[i][j][t], L[j][t]] -> Pair of Maximum Profit & Respective Index with reference to candidate[i] */ int k = 4; //Number of Partitions long long int N = 1 <<( num_items >> 12); //Number of Subsets long long int e = N / k; //Number of Elements per Subset double bestValue = -1; //d vector<triple> A, B; vector< vector<triple> > Ak(k, vector<triple>()); vector< vector<triple> > Bk(k, vector<triple>()); vector<double> maxA(k), maxB(k); vector< vector<int> > candidate(k); vector< vector< vector< pair<double, long long int> > > > Max(k, vector< vector< pair<double, long long int> > >(e, vector< pair<double, long long int> >(2))); merge_lists(A, B, V); //Currently Serial Merging list_to_blocks(A, B, Ak, Bk, k); //Partition Lists to Blocks fsave_max_val(Ak, Bk, maxA, maxB); //Save prune(Ak, Bk, c, maxA, maxB, candidate, bestValue); ssave_max_val(Bk, Max, candidate, bestValue); //par_search(Ak, Bk, c, candidate, Max, bestValue); auto stop = chrono::steady_clock::now(); cout << "\n Computational Time (Parallel) : "; cout << (int)(chrono::duration_cast<chrono::nanoseconds>(stop - start).count()) / 1000000.0; cout << " ms" << endl; //Time the Serial DP Approach start = chrono::steady_clock::now(); //dp_knapSack(c, &wt_arr[0], &p_arr[0], V.size()); stop = chrono::steady_clock::now(); cout << "\n Computational Time (DP Serial) : "; cout << (int)(chrono::duration_cast<chrono::nanoseconds>(stop - start).count()) / 1000000.0; cout << " ms" << endl; cin.get(); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <algorithm> #include <list> #include <vector> #include <iterator> #include <functional> #include <time.h> #include <chrono> #include <cstdlib> using namespace std; struct triple { long long int set; //Set denotes which Elements are in the Subset long int w; //Weight of the Triple //d long int p; //Profit of the Triple //d //Struct Constructor triple() : set(0), w(0.0), p(0.0) {} //Comparison Operator Overloadings bool operator< (const triple &t) const { return (w < t.w); } bool operator> (const triple &t) const { return (w > t.w); } }; void merge_lists(vector<triple> &A, vector<triple> &B, vector< pair<long long int, long long int> > &V) { vector<triple> T_p, Tcopy; triple t; long long int v1s = V.size() >> 1, v2s = V.size() - v1s; //Initialisation for A t.set = 0, t.w = t.p = 0; A.push_back(t); //Sort A in Non-Increasing Order for (long long int i = 0; i < v1s; ++i) { T_p.clear(); Tcopy.clear(); //Add Elements to Subset (Triple) ti //Add ti to T_p for (long long int j = 0; j < (long long int)A.size(); ++j) { t.set = A[j].set + (1 << i); t.w = A[j].w + V[i].first; t.p = A[j].p + V[i].second; T_p.push_back(t); } //Merge A, T_p merge(A.begin(), A.end(), T_p.begin(), T_p.end(), back_inserter(Tcopy)); A = Tcopy; } //Initialisation for B t.set = 0, t.w = t.p = 0; B.push_back(t); //Sort B in Non-Increasing Order for (long long int i = 0; i < v2s; ++i) { T_p.clear(); Tcopy.clear(); //Add Elements to Subset (Triple) ti //Add ti to T_p for (long long int j = 0; j < (long long int)B.size(); ++j) { t.set = B[j].set + (1 << i); t.w = B[j].w + V[i + v1s].first; t.p = B[j].p + V[i + v1s].second; T_p.push_back(t); } //Merge B, T_p merge(B.begin(), B.end(), T_p.begin(), T_p.end(), back_inserter(Tcopy), greater<struct triple>()); B = Tcopy; } } void maxScan(vector<triple> &B, vector< pair<int, long int> > &maxB) { long int Bsize = B.size(); maxB[Bsize - 1].first = B[Bsize - 1].p; maxB[Bsize - 1].second = Bsize - 1; for (long int i = Bsize - 2; i >= 0; i--) { if (B[i].p>maxB[i + 1].first) { maxB[i].first = B[i].p; maxB[i].second = i; } else { maxB[i].first = maxB[i + 1].first; maxB[i].second = maxB[i + 1].second; } } } long int generate_sets(vector<triple> &A, vector<triple> &B, const int &c, vector< pair<int, long long int> > &maxB, long int N) { int bestValue = 0; pair<long long int, long long int> bestSet; long long int i = 0, j = 0; while (i < N && j < N) { if (A[i].w + B[j].w > c) { ++j; if (j == N) break; else continue; } if (A[i].p + maxB[j].first > bestValue) { bestValue = A[i].p + maxB[j].first; bestSet = make_pair(A[i].set, maxB[j].second); } ++i; } return bestValue; } void dp_knapSack(long long int W, double wt[], double val[], long long int n) { long long int i, w; vector< vector<double> > K(n + 1, vector<double>(W + 1)); // Build table K[][] in bottom up manner for (i = 0; i <= n; i++) { for (w = 0; w <= W; w++) { if (i == 0 || w == 0) K[i][w] = 0; else if (wt[i - 1] <= w) K[i][w] = max(val[i - 1] + K[i - 1][w - wt[i - 1]], K[i - 1][w]); else K[i][w] = K[i - 1][w]; } } cout << "\n\n\tBest_DP: " << K[n][W] << endl; } //Input : Sorted Lists -> (A, B) //Output : Partitioned Sorted Lists -> (Ak, Bk) with N/k elements each void list_to_blocks(vector<triple> &A, vector<triple> &B, vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, int k) { long long int e = A.size() / k, i; vector<triple>::iterator Ait, Bit; Ait = A.begin(), Bit = B.begin(); //#pragma omp parallel for shared(A, B, Ak, Bk, e, k) private(i, Ait, Bit) for (i = 0; i < k; ++i) { Ait = A.begin() + i * e; Bit = B.begin() + i * e; copy(Ait, Ait + e, back_inserter(Ak[i])); copy(Bit, Bit + e, back_inserter(Bk[i])); } } //Input : Partitioned Sorted Lists -> (Ak, Bk) //Output : Maximum Profit of Blocks -> (maxAi, maxBi) void fsave_max_val(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, vector<double> &maxA, vector<double> &maxB) { //Needs to be dynamic if not equally partitioned (if N/k not an int) long long int e = maxA.size(), i, j; double Amax, Bmax; #pragma omp parallel for shared(Ak, Bk, maxA, maxB) private(e, i, j, Amax, Bmax) //Here for (i = 0; i < e; ++i) { Amax = Ak[i][0].p; Bmax = Bk[i][0].p; //Perform Parallel Max Search for Better Result for (j = 1; j < Ak[i].size(); ++j) { Amax = (Amax < Ak[i][j].p) ? Ak[i][j].p : Amax; Bmax = (Bmax < Bk[i][j].p) ? Bk[i][j].p : Bmax; } maxA[i] = Amax; maxB[i] = Bmax; } } //Input : Ak, Bk, maxAi, maxBi //Output : Blocks that are within Capacity c void prune(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, double c, vector<double> &maxA, vector<double> &maxB, vector< vector<int> > &candidate, double &bestValue) { int Z, Y; int i, j, k = Ak.size(), e = Ak[0].size(); vector<int> maxValue(k); #pragma omp parallel for reduction(max:bestValue) shared(Ak, Bk, maxA, maxB, maxValue, candidate) private(i, j, Z, Y,c, e) for (i = 0; i < k; ++i) { maxValue[i] = 0; for (j = 0; j < k; ++j) //Here - will lead to CR { Z = Ak[i][0].w + Bk[j][e - 1].w; Y = Ak[i][e - 1].w + Bk[j][0].w; if (Y <= c) { if (maxA[i] + maxB[j] > maxValue[i]) maxValue[i] = maxA[i] + maxB[j]; if (bestValue<maxValue[i]) //Here bestValue = maxValue[i]; } else if (Z <= c && Y > c) candidate[i].push_back(j); // here make copy of block bk[j] } } } //Input : Candidate Block Pairs -> candidate //Output : (Max[i][j][t], L[j][t]) with reference to candidate[i] void ssave_max_val(vector< vector<triple> > &Bk, vector< vector< vector< pair<double, long long int> > > > &Max, vector< vector<int> > &candidate, double &bestValue) { int i, t, l, k = Bk.size(); int j, e = Bk[0].size(); #pragma omp parallel for shared(Bk,candidate,Max) private(i,j,t,l,k,e) //Here for (i = 0; i < k; ++i) { for (t = 0; t < candidate[i].size(); ++t) { //l is the Index of The Block Partition B of the Candidate Block Pair (Bk[l]) l = candidate[i][t]; //Initialise Last Element and Index Max[i][e - 1][t].first = Bk[l][e - 1].p; Max[i][e - 1][t].second = e - 1; //Reverse Inclusive Max-Scan for (j = e - 2; j > -1; --j) { if (Bk[l][j].p > Max[i][j + 1][t].first) { Max[i][j][t].first = Bk[l][j].p; Max[i][j][t].second = j; } else { Max[i][j][t].first = Max[i][j + 1][t].first; Max[i][j][t].second = Max[i][j + 1][t].second; } } } } } //Input : candidate, Max //Output : Best Value void par_search(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, double c, vector< vector<int> > &candidate, vector< vector< vector< pair<double, long long int> > > > &Max, double &bestValue) { int i, j, t, l, k = Ak.size(); long long int e = Ak[0].size(), X, Y; vector<double> maxValue(k); vector< pair<long long int, long long int> > Xi(k); //Xi -> (Index ID of Subset A, Index ID of Subset B) #pragma omp parallel for shared(Ak, Bk, candidate, Max, Xi, maxValue) private(i, j, t, l, X, Y, e,k) //Here for (i = 0; i < k; ++i) { maxValue[i]=0; Xi[i].first = 0, Xi[i].second = 0;// Here for (t = 0; t < candidate[i].size(); ++t) { l = candidate[i][t]; X = 0, Y = 0; while (X < e && Y < e) { if (Ak[i][X].w + Bk[l][Y].w > c) { ++Y; continue; } else if (Ak[i][X].p + Max[i][Y][t].first > maxValue[i]) { maxValue[i] = Ak[i][X].p + Max[i][Y][t].first; Xi[i].first = Ak[i][X].set; Xi[i].second = Bk[l][Max[i][Y][t].second].set; } ++X; } } } //Evaluate Maximum Profit from max(maxValue[i]) long long int X1 = Xi[0].first, X2 = Xi[0].second; for (i = 0; i < k; ++i) if (bestValue < maxValue[i]) { bestValue = maxValue[i]; X1 = Xi[i].first; X2 = Xi[i].second; } //The Subset ID from A, Subset ID from B which gives Maximum Profit (Best Value) cout << "\n\tSubsets : " << X1 << ", " << X2 << endl; cout << "\tBestvalue : " << bestValue << endl; } int main() { //Input Data int c = 0; vector< pair<long long int, long long int> > V; vector<double> wt_arr, p_arr; srand(time(0)); //Number of Items int num_items = 10; //Input Data for (int i = 0; i < num_items; ++i) { double wt = rand() % (long int)1e7; double p = rand() % (long int)1e7; c += wt; V.push_back(make_pair(wt, p)); wt_arr.push_back(wt); p_arr.push_back(p); } //Set capacity c /= 2; printf("\n\tCapacity = %d\n", c); //Computation & Timing auto start = chrono::steady_clock::now(); /* [Ak, Bk] -> Ak has k Blocks with N/k elements each [maxA, maxB] -> maxI has one Element for each Block of List I [candidate] -> candidate[i] is a Vector of Blocks of Bk, which are candidate solutions with Ak[i] [Max[i][j][t], L[j][t]] -> Pair of Maximum Profit & Respective Index with reference to candidate[i] */ int k = 4; //Number of Partitions long long int N = 1 <<( num_items >> 12); //Number of Subsets long long int e = N / k; //Number of Elements per Subset double bestValue = -1; //d vector<triple> A, B; vector< vector<triple> > Ak(k, vector<triple>()); vector< vector<triple> > Bk(k, vector<triple>()); vector<double> maxA(k), maxB(k); vector< vector<int> > candidate(k); vector< vector< vector< pair<double, long long int> > > > Max(k, vector< vector< pair<double, long long int> > >(e, vector< pair<double, long long int> >(2))); merge_lists(A, B, V); //Currently Serial Merging list_to_blocks(A, B, Ak, Bk, k); //Partition Lists to Blocks fsave_max_val(Ak, Bk, maxA, maxB); //Save prune(Ak, Bk, c, maxA, maxB, candidate, bestValue); ssave_max_val(Bk, Max, candidate, bestValue); //par_search(Ak, Bk, c, candidate, Max, bestValue); auto stop = chrono::steady_clock::now(); cout << "\n Computational Time (Parallel) : "; cout << (int)(chrono::duration_cast<chrono::nanoseconds>(stop - start).count()) / 1000000.0; cout << " ms" << endl; //Time the Serial DP Approach start = chrono::steady_clock::now(); //dp_knapSack(c, &wt_arr[0], &p_arr[0], V.size()); stop = chrono::steady_clock::now(); cout << "\n Computational Time (DP Serial) : "; cout << (int)(chrono::duration_cast<chrono::nanoseconds>(stop - start).count()) / 1000000.0; cout << " ms" << endl; cin.get(); return 0; }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#pragma warning (disable : 4267) #pragma warning (disable : 4244) #include <thrust/random.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> #include <thrust/random/normal_distribution.h> #include <iostream> #include <iomanip> #include <cmath> #include "Example_MC_BS.cuh" __host__ __device__ unsigned int hashBS(unsigned int a) { a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } struct estimate_BS : public thrust::unary_function<unsigned int,float> { __device__ float operator()(unsigned int thread_id) { float sum = 0; unsigned int N = 100000; // samples per thread unsigned int seed = thread_id; // seed a random number generator thrust::default_random_engine rng(seed); // create a mapping from random numbers to N(0,1) thrust::random::normal_distribution<float> ndist(0.0f, 1.0f); float S0 = 20.0f; float sig = 0.28f; float r = 0.045f; float K = 21.0f; float T = 0.5f; float sqrtT = sqrtf(T); float sig2 = sig*sig; // take N samples in a quarter circle for(unsigned int i = 0; i < N; ++i) { float W = ndist(rng); float ST = S0 * expf((r - 0.5f*sig2)*T + sig*sqrtT*W); float ST_at = S0 * expf((r - 0.5f*sig2)*T - sig*sqrtT*W); sum += (((ST-K > 0.0f)? ST-K:0.0f) + ((ST_at-K > 0.0f)? ST_at-K:0.0f))/2.0f; } // discount back sum *= expf(-r*T); // divide by N return sum / N; } }; void exmpl_thrust_MC_BS() { // use 30K independent seeds int M = 50000; float estimate = thrust::transform_reduce(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(M), estimate_BS(), 0.0f, thrust::plus<float>()); estimate /= M; std::cout << std::setprecision(10); std::cout << "Option price is approximately " << estimate << std::endl; cudaDeviceReset(); };
#include <hip/hip_runtime.h> void exmpl_thrust_MC_BS(); void exmpl_thrust_MC_BS2(); void exmpl_thrust_MC_BS3();
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> void exmpl_thrust_MC_BS(); void exmpl_thrust_MC_BS2(); void exmpl_thrust_MC_BS3();
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> void exmpl_thrust_MC_BS(); void exmpl_thrust_MC_BS2(); void exmpl_thrust_MC_BS3();
.text .file "Example_MC_BS.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void ThresholdKernel(float min, float max, int mode, float* input, float* output, int size, int count) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; __shared__ float delta; if(id < size) { if (threadIdx.x == 0) delta = (max - min)/count; __syncthreads(); for (int i = 0; i < count; i++) output[i * size + id] = 0; int idx; float fidx = ((input[id] - min) / delta); switch (mode) { case 0: // consider values outside of the interval <min,max> idx = (int)floor(fmaxf(0, fminf(fidx, count - 1))); break; case 1: // strict threshold if (fidx < 0.0f || fidx >= count) { return; } idx = (int)fidx; break; } output[idx * size + id] = 1.0f; } }
code for sm_80 Function : _Z15ThresholdKernelffiPfS_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002600 */ /*0020*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e280000002500 */ /*0030*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e620000002100 */ /*0040*/ IMAD R3, R3, c[0x0][0xc], R2 ; /* 0x0000030003037a24 */ /* 0x001fc800078e0202 */ /*0050*/ IMAD R2, R3, c[0x0][0x0], R0 ; /* 0x0000000003027a24 */ /* 0x002fca00078e0200 */ /*0060*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x180], PT ; /* 0x0000600002007a0c */ /* 0x000fda0003f06270 */ /*0070*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0080*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fe20003f05270 */ /*0090*/ BSSY B0, 0x1c0 ; /* 0x0000012000007945 */ /* 0x000fd80003800000 */ /*00a0*/ @P0 BRA 0x1b0 ; /* 0x0000010000000947 */ /* 0x000fea0003800000 */ /*00b0*/ I2F R5, c[0x0][0x184] ; /* 0x0000610000057b06 */ /* 0x000e220000201400 */ /*00c0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff047624 */ /* 0x000fc800078e00ff */ /*00d0*/ FADD R4, R4, -c[0x0][0x160] ; /* 0x8000580004047621 */ /* 0x000fc60000000000 */ /*00e0*/ MUFU.RCP R6, R5 ; /* 0x0000000500067308 */ /* 0x001e300000001000 */ /*00f0*/ FCHK P0, R4, R5 ; /* 0x0000000504007302 */ /* 0x000e620000000000 */ /*0100*/ FFMA R7, -R5, R6, 1 ; /* 0x3f80000005077423 */ /* 0x001fc80000000106 */ /*0110*/ FFMA R7, R6, R7, R6 ; /* 0x0000000706077223 */ /* 0x000fc80000000006 */ /*0120*/ FFMA R6, R4, R7, RZ ; /* 0x0000000704067223 */ /* 0x000fc800000000ff */ /*0130*/ FFMA R8, -R5, R6, R4 ; /* 0x0000000605087223 */ /* 0x000fc80000000104 */ /*0140*/ FFMA R6, R7, R8, R6 ; /* 0x0000000807067223 */ /* 0x000fe20000000006 */ /*0150*/ @!P0 BRA 0x1a0 ; /* 0x0000004000008947 */ /* 0x002fea0003800000 */ /*0160*/ IMAD.MOV.U32 R8, RZ, RZ, R4 ; /* 0x000000ffff087224 */ /* 0x000fe200078e0004 */ /*0170*/ MOV R4, 0x1a0 ; /* 0x000001a000047802 */ /* 0x000fe20000000f00 */ /*0180*/ IMAD.MOV.U32 R9, RZ, RZ, R5 ; /* 0x000000ffff097224 */ /* 0x000fe400078e0005 */ /*0190*/ CALL.REL.NOINC 0xac0 ; /* 0x0000092000007944 */ /* 0x000fea0003c00000 */ /*01a0*/ STS [RZ], R6 ; /* 0x00000006ff007388 */ /* 0x0001e40000000800 */ /*01b0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*01c0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff047624 */ /* 0x000fe200078e00ff */ /*01d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe80000010000 */ /*01e0*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */ /* 0x000fe40003f06270 */ /*01f0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fd60000000a00 */ /*0200*/ @!P0 BRA 0x820 ; /* 0x0000061000008947 */ /* 0x000fea0003800000 */ /*0210*/ IADD3 R5, R4.reuse, -0x1, RZ ; /* 0xffffffff04057810 */ /* 0x040fe40007ffe0ff */ /*0220*/ LOP3.LUT R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */ /* 0x000fe400078ec0ff */ /*0230*/ ISETP.GE.U32.AND P0, PT, R5, 0x3, PT ; /* 0x000000030500780c */ /* 0x000fe20003f06070 */ /*0240*/ IMAD.MOV.U32 R5, RZ, RZ, RZ ; /* 0x000000ffff057224 */ /* 0x000fd800078e00ff */ /*0250*/ @!P0 BRA 0x770 ; /* 0x0000051000008947 */ /* 0x000fea0003800000 */ /*0260*/ IADD3 R28, -R4, c[0x0][0x184], RZ ; /* 0x00006100041c7a10 */ /* 0x000fe20007ffe1ff */ /*0270*/ IMAD.MOV.U32 R29, RZ, RZ, 0x4 ; /* 0x00000004ff1d7424 */ /* 0x000fe200078e00ff */ /*0280*/ HFMA2.MMA R5, -RZ, RZ, 0, 0 ; /* 0x00000000ff057435 */ /* 0x000fe400000001ff */ /*0290*/ ISETP.GT.AND P0, PT, R28, RZ, PT ; /* 0x000000ff1c00720c */ /* 0x000fe20003f04270 */ /*02a0*/ IMAD.WIDE R16, R2, R29, c[0x0][0x178] ; /* 0x00005e0002107625 */ /* 0x000fd800078e021d */ /*02b0*/ @!P0 BRA 0x6b0 ; /* 0x000003f000008947 */ /* 0x000fea0003800000 */ /*02c0*/ ISETP.GT.AND P1, PT, R28, 0xc, PT ; /* 0x0000000c1c00780c */ /* 0x000fe40003f24270 */ /*02d0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*02e0*/ @!P1 BRA 0x540 ; /* 0x0000025000009947 */ /* 0x000fea0003800000 */ /*02f0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0300*/ IMAD.WIDE R18, R29, c[0x0][0x180], R16 ; /* 0x000060001d127a25 */ /* 0x004fe200078e0210 */ /*0310*/ STG.E [R16.64], RZ ; /* 0x000000ff10007986 */ /* 0x0003e2000c101906 */ /*0320*/ IADD3 R28, R28, -0x10, RZ ; /* 0xfffffff01c1c7810 */ /* 0x000fe40007ffe0ff */ /*0330*/ IADD3 R5, R5, 0x10, RZ ; /* 0x0000001005057810 */ /* 0x000fe20007ffe0ff */ /*0340*/ STG.E [R18.64], RZ ; /* 0x000000ff12007986 */ /* 0x0005e2000c101906 */ /*0350*/ IMAD.WIDE R20, R29, c[0x0][0x180], R18 ; /* 0x000060001d147a25 */ /* 0x000fe200078e0212 */ /*0360*/ ISETP.GT.AND P1, PT, R28, 0xc, PT ; /* 0x0000000c1c00780c */ /* 0x000fc80003f24270 */ /*0370*/ STG.E [R20.64], RZ ; /* 0x000000ff14007986 */ /* 0x0007e2000c101906 */ /*0380*/ IMAD.WIDE R24, R29, c[0x0][0x180], R20 ; /* 0x000060001d187a25 */ /* 0x000fca00078e0214 */ /*0390*/ STG.E [R24.64], RZ ; /* 0x000000ff18007986 */ /* 0x000fe2000c101906 */ /*03a0*/ IMAD.WIDE R6, R29, c[0x0][0x180], R24 ; /* 0x000060001d067a25 */ /* 0x001fca00078e0218 */ /*03b0*/ STG.E [R6.64], RZ ; /* 0x000000ff06007986 */ /* 0x0001e2000c101906 */ /*03c0*/ IMAD.WIDE R8, R29, c[0x0][0x180], R6 ; /* 0x000060001d087a25 */ /* 0x000fca00078e0206 */ /*03d0*/ STG.E [R8.64], RZ ; /* 0x000000ff08007986 */ /* 0x000fe2000c101906 */ /*03e0*/ IMAD.WIDE R10, R29, c[0x0][0x180], R8 ; /* 0x000060001d0a7a25 */ /* 0x000fca00078e0208 */ /*03f0*/ STG.E [R10.64], RZ ; /* 0x000000ff0a007986 */ /* 0x000fe2000c101906 */ /*0400*/ IMAD.WIDE R12, R29, c[0x0][0x180], R10 ; /* 0x000060001d0c7a25 */ /* 0x000fca00078e020a */ /*0410*/ STG.E [R12.64], RZ ; /* 0x000000ff0c007986 */ /* 0x000fe2000c101906 */ /*0420*/ IMAD.WIDE R14, R29, c[0x0][0x180], R12 ; /* 0x000060001d0e7a25 */ /* 0x000fca00078e020c */ /*0430*/ STG.E [R14.64], RZ ; /* 0x000000ff0e007986 */ /* 0x000fe2000c101906 */ /*0440*/ IMAD.WIDE R16, R29, c[0x0][0x180], R14 ; /* 0x000060001d107a25 */ /* 0x002fca00078e020e */ /*0450*/ STG.E [R16.64], RZ ; /* 0x000000ff10007986 */ /* 0x0003e2000c101906 */ /*0460*/ IMAD.WIDE R18, R29, c[0x0][0x180], R16 ; /* 0x000060001d127a25 */ /* 0x004fca00078e0210 */ /*0470*/ STG.E [R18.64], RZ ; /* 0x000000ff12007986 */ /* 0x0005e2000c101906 */ /*0480*/ IMAD.WIDE R20, R29, c[0x0][0x180], R18 ; /* 0x000060001d147a25 */ /* 0x008fca00078e0212 */ /*0490*/ STG.E [R20.64], RZ ; /* 0x000000ff14007986 */ /* 0x0005e2000c101906 */ /*04a0*/ IMAD.WIDE R22, R29, c[0x0][0x180], R20 ; /* 0x000060001d167a25 */ /* 0x000fca00078e0214 */ /*04b0*/ STG.E [R22.64], RZ ; /* 0x000000ff16007986 */ /* 0x0005e2000c101906 */ /*04c0*/ IMAD.WIDE R26, R29, c[0x0][0x180], R22 ; /* 0x000060001d1a7a25 */ /* 0x000fca00078e0216 */ /*04d0*/ STG.E [R26.64], RZ ; /* 0x000000ff1a007986 */ /* 0x0005e2000c101906 */ /*04e0*/ IMAD.WIDE R6, R29, c[0x0][0x180], R26 ; /* 0x000060001d067a25 */ /* 0x001fca00078e021a */ /*04f0*/ STG.E [R6.64], RZ ; /* 0x000000ff06007986 */ /* 0x0005e2000c101906 */ /*0500*/ IMAD.WIDE R24, R29, c[0x0][0x180], R6 ; /* 0x000060001d187a25 */ /* 0x000fca00078e0206 */ /*0510*/ STG.E [R24.64], RZ ; /* 0x000000ff18007986 */ /* 0x0005e2000c101906 */ /*0520*/ IMAD.WIDE R16, R29, c[0x0][0x180], R24 ; /* 0x000060001d107a25 */ /* 0x002fe200078e0218 */ /*0530*/ @P1 BRA 0x300 ; /* 0xfffffdc000001947 */ /* 0x000fea000383ffff */ /*0540*/ ISETP.GT.AND P1, PT, R28, 0x4, PT ; /* 0x000000041c00780c */ /* 0x000fda0003f24270 */ /*0550*/ @!P1 BRA 0x690 ; /* 0x0000013000009947 */ /* 0x000fea0003800000 */ /*0560*/ IMAD.WIDE R14, R29, c[0x0][0x180], R16 ; /* 0x000060001d0e7a25 */ /* 0x000fe200078e0210 */ /*0570*/ STG.E [R16.64], RZ ; /* 0x000000ff10007986 */ /* 0x0003e2000c101906 */ /*0580*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0590*/ IADD3 R5, R5, 0x8, RZ ; /* 0x0000000805057810 */ /* 0x000fe20007ffe0ff */ /*05a0*/ STG.E [R14.64], RZ ; /* 0x000000ff0e007986 */ /* 0x0007e2000c101906 */ /*05b0*/ IMAD.WIDE R18, R29, c[0x0][0x180], R14 ; /* 0x000060001d127a25 */ /* 0x004fe200078e020e */ /*05c0*/ IADD3 R28, R28, -0x8, RZ ; /* 0xfffffff81c1c7810 */ /* 0x000fc80007ffe0ff */ /*05d0*/ STG.E [R18.64], RZ ; /* 0x000000ff12007986 */ /* 0x0007e2000c101906 */ /*05e0*/ IMAD.WIDE R12, R29, c[0x0][0x180], R18 ; /* 0x000060001d0c7a25 */ /* 0x000fca00078e0212 */ /*05f0*/ STG.E [R12.64], RZ ; /* 0x000000ff0c007986 */ /* 0x0007e2000c101906 */ /*0600*/ IMAD.WIDE R10, R29, c[0x0][0x180], R12 ; /* 0x000060001d0a7a25 */ /* 0x000fca00078e020c */ /*0610*/ STG.E [R10.64], RZ ; /* 0x000000ff0a007986 */ /* 0x0007e2000c101906 */ /*0620*/ IMAD.WIDE R8, R29, c[0x0][0x180], R10 ; /* 0x000060001d087a25 */ /* 0x000fca00078e020a */ /*0630*/ STG.E [R8.64], RZ ; /* 0x000000ff08007986 */ /* 0x0007e2000c101906 */ /*0640*/ IMAD.WIDE R6, R29, c[0x0][0x180], R8 ; /* 0x000060001d067a25 */ /* 0x001fca00078e0208 */ /*0650*/ STG.E [R6.64], RZ ; /* 0x000000ff06007986 */ /* 0x0007e2000c101906 */ /*0660*/ IMAD.WIDE R20, R29, c[0x0][0x180], R6 ; /* 0x000060001d147a25 */ /* 0x000fca00078e0206 */ /*0670*/ STG.E [R20.64], RZ ; /* 0x000000ff14007986 */ /* 0x0007e2000c101906 */ /*0680*/ IMAD.WIDE R16, R29, c[0x0][0x180], R20 ; /* 0x000060001d107a25 */ /* 0x002fc800078e0214 */ /*0690*/ ISETP.NE.OR P0, PT, R28, RZ, P0 ; /* 0x000000ff1c00720c */ /* 0x000fda0000705670 */ /*06a0*/ @!P0 BRA 0x770 ; /* 0x000000c000008947 */ /* 0x000fea0003800000 */ /*06b0*/ IMAD.WIDE R6, R29, c[0x0][0x180], R16 ; /* 0x000060001d067a25 */ /* 0x00dfe200078e0210 */ /*06c0*/ STG.E [R16.64], RZ ; /* 0x000000ff10007986 */ /* 0x0001e2000c101906 */ /*06d0*/ IADD3 R28, R28, -0x4, RZ ; /* 0xfffffffc1c1c7810 */ /* 0x000fe40007ffe0ff */ /*06e0*/ IADD3 R5, R5, 0x4, RZ ; /* 0x0000000405057810 */ /* 0x000fe20007ffe0ff */ /*06f0*/ STG.E [R6.64], RZ ; /* 0x000000ff06007986 */ /* 0x0003e2000c101906 */ /*0700*/ IMAD.WIDE R8, R29, c[0x0][0x180], R6 ; /* 0x000060001d087a25 */ /* 0x000fe200078e0206 */ /*0710*/ ISETP.NE.AND P0, PT, R28, RZ, PT ; /* 0x000000ff1c00720c */ /* 0x000fc80003f05270 */ /*0720*/ STG.E [R8.64], RZ ; /* 0x000000ff08007986 */ /* 0x0003e2000c101906 */ /*0730*/ IMAD.WIDE R10, R29, c[0x0][0x180], R8 ; /* 0x000060001d0a7a25 */ /* 0x000fca00078e0208 */ /*0740*/ STG.E [R10.64], RZ ; /* 0x000000ff0a007986 */ /* 0x0003e2000c101906 */ /*0750*/ IMAD.WIDE R16, R29, c[0x0][0x180], R10 ; /* 0x000060001d107a25 */ /* 0x001fe400078e020a */ /*0760*/ @P0 BRA 0x6b0 ; /* 0xffffff4000000947 */ /* 0x002fea000383ffff */ /*0770*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fda0003f05270 */ /*0780*/ @!P0 BRA 0x820 ; /* 0x0000009000008947 */ /* 0x000fea0003800000 */ /*0790*/ IMAD R0, R5, c[0x0][0x180], R0 ; /* 0x0000600005007a24 */ /* 0x000fe400078e0200 */ /*07a0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x000fe400078e00ff */ /*07b0*/ IMAD R0, R3, c[0x0][0x0], R0 ; /* 0x0000000003007a24 */ /* 0x000fc800078e0200 */ /*07c0*/ IMAD.WIDE R6, R0, R5, c[0x0][0x178] ; /* 0x00005e0000067625 */ /* 0x00dfc800078e0205 */ /*07d0*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */ /* 0x000fe20007ffe0ff */ /*07e0*/ STG.E [R6.64], RZ ; /* 0x000000ff06007986 */ /* 0x0001e6000c101906 */ /*07f0*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fe20003f05270 */ /*0800*/ IMAD.WIDE R6, R5, c[0x0][0x180], R6 ; /* 0x0000600005067a25 */ /* 0x001fd800078e0206 */ /*0810*/ @P0 BRA 0x7d0 ; /* 0xffffffb000000947 */ /* 0x000fea000383ffff */ /*0820*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x000fe200078e00ff */ /*0830*/ LDS R3, [RZ] ; /* 0x00000000ff037984 */ /* 0x000e660000000800 */ /*0840*/ IMAD.WIDE R4, R2, R5, c[0x0][0x170] ; /* 0x00005c0002047625 */ /* 0x000fcc00078e0205 */ /*0850*/ LDG.E R4, [R4.64] ; /* 0x0000000604047981 */ /* 0x000f22000c1e1900 */ /*0860*/ BSSY B0, 0x950 ; /* 0x000000e000007945 */ /* 0x000fe20003800000 */ /*0870*/ MUFU.RCP R6, R3 ; /* 0x0000000300067308 */ /* 0x00fe240000001000 */ /*0880*/ FFMA R7, -R3, R6, 1 ; /* 0x3f80000003077423 */ /* 0x001fc80000000106 */ /*0890*/ FFMA R7, R6, R7, R6 ; /* 0x0000000706077223 */ /* 0x000fe40000000006 */ /*08a0*/ FADD R0, R4, -c[0x0][0x160] ; /* 0x8000580004007621 */ /* 0x010fc80000000000 */ /*08b0*/ FCHK P0, R0, R3 ; /* 0x0000000300007302 */ /* 0x000e220000000000 */ /*08c0*/ FFMA R6, R0, R7, RZ ; /* 0x0000000700067223 */ /* 0x000fc800000000ff */ /*08d0*/ FFMA R8, -R3, R6, R0 ; /* 0x0000000603087223 */ /* 0x000fc80000000100 */ /*08e0*/ FFMA R6, R7, R8, R6 ; /* 0x0000000807067223 */ /* 0x000fe20000000006 */ /*08f0*/ @!P0 BRA 0x940 ; /* 0x0000004000008947 */ /* 0x001fea0003800000 */ /*0900*/ IMAD.MOV.U32 R8, RZ, RZ, R0 ; /* 0x000000ffff087224 */ /* 0x000fe200078e0000 */ /*0910*/ MOV R4, 0x940 ; /* 0x0000094000047802 */ /* 0x000fe20000000f00 */ /*0920*/ IMAD.MOV.U32 R9, RZ, RZ, R3 ; /* 0x000000ffff097224 */ /* 0x000fe400078e0003 */ /*0930*/ CALL.REL.NOINC 0xac0 ; /* 0x0000018000007944 */ /* 0x000fea0003c00000 */ /*0940*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0950*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */ /* 0x000fda0003f05270 */ /*0960*/ @!P0 BRA 0xa00 ; /* 0x0000009000008947 */ /* 0x000fea0003800000 */ /*0970*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff007624 */ /* 0x000fca00078e00ff */ /*0980*/ ISETP.NE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */ /* 0x000fda0003f05270 */ /*0990*/ @P0 BRA 0xa60 ; /* 0x000000c000000947 */ /* 0x000fea0003800000 */ /*09a0*/ I2F R3, c[0x0][0x184] ; /* 0x0000610000037b06 */ /* 0x000e240000201400 */ /*09b0*/ FSETP.GE.AND P0, PT, R6, R3, PT ; /* 0x000000030600720b */ /* 0x001fc80003f06000 */ /*09c0*/ FSETP.LT.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720b */ /* 0x000fda0000701400 */ /*09d0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*09e0*/ F2I.TRUNC.NTZ R3, R6 ; /* 0x0000000600037305 */ /* 0x000062000020f100 */ /*09f0*/ BRA 0xa60 ; /* 0x0000006000007947 */ /* 0x000fea0003800000 */ /*0a00*/ ULDC UR4, c[0x0][0x184] ; /* 0x0000610000047ab9 */ /* 0x000fe40000000800 */ /*0a10*/ UIADD3 UR4, UR4, -0x1, URZ ; /* 0xffffffff04047890 */ /* 0x000fd2000fffe03f */ /*0a20*/ I2F R3, UR4 ; /* 0x0000000400037d06 */ /* 0x000e240008201400 */ /*0a30*/ FMNMX R3, R3, R6, PT ; /* 0x0000000603037209 */ /* 0x001fc80003800000 */ /*0a40*/ FMNMX R3, RZ, R3, !PT ; /* 0x00000003ff037209 */ /* 0x000fcc0007800000 */ /*0a50*/ F2I.FLOOR.NTZ R3, R3 ; /* 0x0000000300037305 */ /* 0x000e240000207100 */ /*0a60*/ HFMA2.MMA R0, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff007435 */ /* 0x000fe200000001ff */ /*0a70*/ IMAD R3, R3, c[0x0][0x180], R2 ; /* 0x0000600003037a24 */ /* 0x003fe400078e0202 */ /*0a80*/ IMAD.MOV.U32 R5, RZ, RZ, 0x3f800000 ; /* 0x3f800000ff057424 */ /* 0x000fce00078e00ff */ /*0a90*/ IMAD.WIDE R2, R3, R0, c[0x0][0x178] ; /* 0x00005e0003027625 */ /* 0x000fca00078e0200 */ /*0aa0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101906 */ /*0ab0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0ac0*/ SHF.R.U32.HI R6, RZ, 0x17, R9.reuse ; /* 0x00000017ff067819 */ /* 0x100fe20000011609 */ /*0ad0*/ BSSY B1, 0x1120 ; /* 0x0000064000017945 */ /* 0x000fe20003800000 */ /*0ae0*/ SHF.R.U32.HI R5, RZ, 0x17, R8.reuse ; /* 0x00000017ff057819 */ /* 0x100fe40000011608 */ /*0af0*/ LOP3.LUT R13, R6, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff060d7812 */ /* 0x000fe200078ec0ff */ /*0b00*/ IMAD.MOV.U32 R6, RZ, RZ, R9 ; /* 0x000000ffff067224 */ /* 0x000fe200078e0009 */ /*0b10*/ LOP3.LUT R11, R5, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff050b7812 */ /* 0x000fe200078ec0ff */ /*0b20*/ IMAD.MOV.U32 R5, RZ, RZ, R8 ; /* 0x000000ffff057224 */ /* 0x000fe200078e0008 */ /*0b30*/ IADD3 R12, R13, -0x1, RZ ; /* 0xffffffff0d0c7810 */ /* 0x000fe40007ffe0ff */ /*0b40*/ IADD3 R10, R11, -0x1, RZ ; /* 0xffffffff0b0a7810 */ /* 0x000fc40007ffe0ff */ /*0b50*/ ISETP.GT.U32.AND P0, PT, R12, 0xfd, PT ; /* 0x000000fd0c00780c */ /* 0x000fc80003f04070 */ /*0b60*/ ISETP.GT.U32.OR P0, PT, R10, 0xfd, P0 ; /* 0x000000fd0a00780c */ /* 0x000fda0000704470 */ /*0b70*/ @!P0 IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff078224 */ /* 0x000fe200078e00ff */ /*0b80*/ @!P0 BRA 0xd00 ; /* 0x0000017000008947 */ /* 0x000fea0003800000 */ /*0b90*/ FSETP.GTU.FTZ.AND P0, PT, |R8|, +INF , PT ; /* 0x7f8000000800780b */ /* 0x000fe40003f1c200 */ /*0ba0*/ FSETP.GTU.FTZ.AND P1, PT, |R9|, +INF , PT ; /* 0x7f8000000900780b */ /* 0x000fc80003f3c200 */ /*0bb0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */ /* 0x000fda0000703570 */ /*0bc0*/ @P0 BRA 0x1100 ; /* 0x0000053000000947 */ /* 0x000fea0003800000 */ /*0bd0*/ LOP3.LUT P0, RZ, R6, 0x7fffffff, R5, 0xc8, !PT ; /* 0x7fffffff06ff7812 */ /* 0x000fda000780c805 */ /*0be0*/ @!P0 BRA 0x10e0 ; /* 0x000004f000008947 */ /* 0x000fea0003800000 */ /*0bf0*/ FSETP.NEU.FTZ.AND P2, PT, |R8|.reuse, +INF , PT ; /* 0x7f8000000800780b */ /* 0x040fe40003f5d200 */ /*0c00*/ FSETP.NEU.FTZ.AND P1, PT, |R9|, +INF , PT ; /* 0x7f8000000900780b */ /* 0x000fe40003f3d200 */ /*0c10*/ FSETP.NEU.FTZ.AND P0, PT, |R8|, +INF , PT ; /* 0x7f8000000800780b */ /* 0x000fd60003f1d200 */ /*0c20*/ @!P1 BRA !P2, 0x10e0 ; /* 0x000004b000009947 */ /* 0x000fea0005000000 */ /*0c30*/ LOP3.LUT P2, RZ, R5, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff05ff7812 */ /* 0x000fc8000784c0ff */ /*0c40*/ PLOP3.LUT P1, PT, P1, P2, PT, 0x2a, 0x0 ; /* 0x000000000000781c */ /* 0x000fda0000f24572 */ /*0c50*/ @P1 BRA 0x10c0 ; /* 0x0000046000001947 */ /* 0x000fea0003800000 */ /*0c60*/ LOP3.LUT P1, RZ, R6, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff06ff7812 */ /* 0x000fc8000782c0ff */ /*0c70*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x2a, 0x0 ; /* 0x000000000000781c */ /* 0x000fda0000702572 */ /*0c80*/ @P0 BRA 0x1090 ; /* 0x0000040000000947 */ /* 0x000fea0003800000 */ /*0c90*/ ISETP.GE.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */ /* 0x000fe40003f06270 */ /*0ca0*/ ISETP.GE.AND P1, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */ /* 0x000fd60003f26270 */ /*0cb0*/ @P0 IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff070224 */ /* 0x000fe200078e00ff */ /*0cc0*/ @!P0 MOV R7, 0xffffffc0 ; /* 0xffffffc000078802 */ /* 0x000fe20000000f00 */ /*0cd0*/ @!P0 FFMA R5, R8, 1.84467440737095516160e+19, RZ ; /* 0x5f80000008058823 */ /* 0x000fe400000000ff */ /*0ce0*/ @!P1 FFMA R6, R9, 1.84467440737095516160e+19, RZ ; /* 0x5f80000009069823 */ /* 0x000fe200000000ff */ /*0cf0*/ @!P1 IADD3 R7, R7, 0x40, RZ ; /* 0x0000004007079810 */ /* 0x000fe40007ffe0ff */ /*0d00*/ LEA R9, R13, 0xc0800000, 0x17 ; /* 0xc08000000d097811 */ /* 0x000fe200078eb8ff */ /*0d10*/ BSSY B2, 0x1080 ; /* 0x0000036000027945 */ /* 0x000fe80003800000 */ /*0d20*/ IMAD.IADD R9, R6, 0x1, -R9 ; /* 0x0000000106097824 */ /* 0x000fe200078e0a09 */ /*0d30*/ IADD3 R6, R11, -0x7f, RZ ; /* 0xffffff810b067810 */ /* 0x000fc60007ffe0ff */ /*0d40*/ MUFU.RCP R8, R9 ; /* 0x0000000900087308 */ /* 0x000e220000001000 */ /*0d50*/ FADD.FTZ R10, -R9, -RZ ; /* 0x800000ff090a7221 */ /* 0x000fe40000010100 */ /*0d60*/ IMAD R5, R6, -0x800000, R5 ; /* 0xff80000006057824 */ /* 0x000fe400078e0205 */ /*0d70*/ FFMA R11, R8, R10, 1 ; /* 0x3f800000080b7423 */ /* 0x001fc8000000000a */ /*0d80*/ FFMA R12, R8, R11, R8 ; /* 0x0000000b080c7223 */ /* 0x000fc80000000008 */ /*0d90*/ FFMA R8, R5, R12, RZ ; /* 0x0000000c05087223 */ /* 0x000fc800000000ff */ /*0da0*/ FFMA R11, R10, R8, R5 ; /* 0x000000080a0b7223 */ /* 0x000fc80000000005 */ /*0db0*/ FFMA R11, R12, R11, R8 ; /* 0x0000000b0c0b7223 */ /* 0x000fe20000000008 */ /*0dc0*/ IADD3 R8, R6, 0x7f, -R13 ; /* 0x0000007f06087810 */ /* 0x000fc60007ffe80d */ /*0dd0*/ FFMA R10, R10, R11, R5 ; /* 0x0000000b0a0a7223 */ /* 0x000fe40000000005 */ /*0de0*/ IMAD.IADD R8, R8, 0x1, R7 ; /* 0x0000000108087824 */ /* 0x000fe400078e0207 */ /*0df0*/ FFMA R5, R12, R10, R11 ; /* 0x0000000a0c057223 */ /* 0x000fca000000000b */ /*0e00*/ SHF.R.U32.HI R6, RZ, 0x17, R5 ; /* 0x00000017ff067819 */ /* 0x000fc80000011605 */ /*0e10*/ LOP3.LUT R6, R6, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff06067812 */ /* 0x000fca00078ec0ff */ /*0e20*/ IMAD.IADD R13, R6, 0x1, R8 ; /* 0x00000001060d7824 */ /* 0x000fca00078e0208 */ /*0e30*/ IADD3 R6, R13, -0x1, RZ ; /* 0xffffffff0d067810 */ /* 0x000fc80007ffe0ff */ /*0e40*/ ISETP.GE.U32.AND P0, PT, R6, 0xfe, PT ; /* 0x000000fe0600780c */ /* 0x000fda0003f06070 */ /*0e50*/ @!P0 BRA 0x1060 ; /* 0x0000020000008947 */ /* 0x000fea0003800000 */ /*0e60*/ ISETP.GT.AND P0, PT, R13, 0xfe, PT ; /* 0x000000fe0d00780c */ /* 0x000fda0003f04270 */ /*0e70*/ @P0 BRA 0x1030 ; /* 0x000001b000000947 */ /* 0x000fea0003800000 */ /*0e80*/ ISETP.GE.AND P0, PT, R13, 0x1, PT ; /* 0x000000010d00780c */ /* 0x000fda0003f06270 */ /*0e90*/ @P0 BRA 0x1070 ; /* 0x000001d000000947 */ /* 0x000fea0003800000 */ /*0ea0*/ ISETP.GE.AND P0, PT, R13, -0x18, PT ; /* 0xffffffe80d00780c */ /* 0x000fe40003f06270 */ /*0eb0*/ LOP3.LUT R5, R5, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000005057812 */ /* 0x000fd600078ec0ff */ /*0ec0*/ @!P0 BRA 0x1070 ; /* 0x000001a000008947 */ /* 0x000fea0003800000 */ /*0ed0*/ FFMA.RZ R6, R12.reuse, R10.reuse, R11.reuse ; /* 0x0000000a0c067223 */ /* 0x1c0fe2000000c00b */ /*0ee0*/ IADD3 R9, R13.reuse, 0x20, RZ ; /* 0x000000200d097810 */ /* 0x040fe20007ffe0ff */ /*0ef0*/ FFMA.RM R7, R12, R10.reuse, R11.reuse ; /* 0x0000000a0c077223 */ /* 0x180fe2000000400b */ /*0f00*/ ISETP.NE.AND P2, PT, R13.reuse, RZ, PT ; /* 0x000000ff0d00720c */ /* 0x040fe40003f45270 */ /*0f10*/ LOP3.LUT R8, R6, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff06087812 */ /* 0x000fe200078ec0ff */ /*0f20*/ FFMA.RP R6, R12, R10, R11 ; /* 0x0000000a0c067223 */ /* 0x000fe2000000800b */ /*0f30*/ ISETP.NE.AND P1, PT, R13, RZ, PT ; /* 0x000000ff0d00720c */ /* 0x000fe20003f25270 */ /*0f40*/ IMAD.MOV R10, RZ, RZ, -R13 ; /* 0x000000ffff0a7224 */ /* 0x000fe200078e0a0d */ /*0f50*/ LOP3.LUT R8, R8, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000008087812 */ /* 0x000fe400078efcff */ /*0f60*/ FSETP.NEU.FTZ.AND P0, PT, R6, R7, PT ; /* 0x000000070600720b */ /* 0x000fc40003f1d000 */ /*0f70*/ SHF.L.U32 R9, R8, R9, RZ ; /* 0x0000000908097219 */ /* 0x000fe400000006ff */ /*0f80*/ SEL R7, R10, RZ, P2 ; /* 0x000000ff0a077207 */ /* 0x000fe40001000000 */ /*0f90*/ ISETP.NE.AND P1, PT, R9, RZ, P1 ; /* 0x000000ff0900720c */ /* 0x000fe40000f25270 */ /*0fa0*/ SHF.R.U32.HI R7, RZ, R7, R8 ; /* 0x00000007ff077219 */ /* 0x000fe40000011608 */ /*0fb0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40000703570 */ /*0fc0*/ SHF.R.U32.HI R9, RZ, 0x1, R7 ; /* 0x00000001ff097819 */ /* 0x000fc40000011607 */ /*0fd0*/ SEL R6, RZ, 0x1, !P0 ; /* 0x00000001ff067807 */ /* 0x000fc80004000000 */ /*0fe0*/ LOP3.LUT R6, R6, 0x1, R9, 0xf8, !PT ; /* 0x0000000106067812 */ /* 0x000fc800078ef809 */ /*0ff0*/ LOP3.LUT R6, R6, R7, RZ, 0xc0, !PT ; /* 0x0000000706067212 */ /* 0x000fc800078ec0ff */ /*1000*/ IADD3 R6, R9, R6, RZ ; /* 0x0000000609067210 */ /* 0x000fc80007ffe0ff */ /*1010*/ LOP3.LUT R5, R6, R5, RZ, 0xfc, !PT ; /* 0x0000000506057212 */ /* 0x000fe200078efcff */ /*1020*/ BRA 0x1070 ; /* 0x0000004000007947 */ /* 0x000fea0003800000 */ /*1030*/ LOP3.LUT R5, R5, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000005057812 */ /* 0x000fc800078ec0ff */ /*1040*/ LOP3.LUT R5, R5, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000005057812 */ /* 0x000fe200078efcff */ /*1050*/ BRA 0x1070 ; /* 0x0000001000007947 */ /* 0x000fea0003800000 */ /*1060*/ IMAD R5, R8, 0x800000, R5 ; /* 0x0080000008057824 */ /* 0x000fe400078e0205 */ /*1070*/ BSYNC B2 ; /* 0x0000000000027941 */ /* 0x000fea0003800000 */ /*1080*/ BRA 0x1110 ; /* 0x0000008000007947 */ /* 0x000fea0003800000 */ /*1090*/ LOP3.LUT R5, R6, 0x80000000, R5, 0x48, !PT ; /* 0x8000000006057812 */ /* 0x000fc800078e4805 */ /*10a0*/ LOP3.LUT R5, R5, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000005057812 */ /* 0x000fe200078efcff */ /*10b0*/ BRA 0x1110 ; /* 0x0000005000007947 */ /* 0x000fea0003800000 */ /*10c0*/ LOP3.LUT R5, R6, 0x80000000, R5, 0x48, !PT ; /* 0x8000000006057812 */ /* 0x000fe200078e4805 */ /*10d0*/ BRA 0x1110 ; /* 0x0000003000007947 */ /* 0x000fea0003800000 */ /*10e0*/ MUFU.RSQ R5, -QNAN ; /* 0xffc0000000057908 */ /* 0x000e220000001400 */ /*10f0*/ BRA 0x1110 ; /* 0x0000001000007947 */ /* 0x000fea0003800000 */ /*1100*/ FADD.FTZ R5, R8, R9 ; /* 0x0000000908057221 */ /* 0x000fe40000010000 */ /*1110*/ BSYNC B1 ; /* 0x0000000000017941 */ /* 0x000fea0003800000 */ /*1120*/ IMAD.MOV.U32 R6, RZ, RZ, R5 ; /* 0x000000ffff067224 */ /* 0x001fe400078e0005 */ /*1130*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */ /* 0x000fc800078e00ff */ /*1140*/ RET.REL.NODEC R4 0x0 ; /* 0xffffeeb004007950 */ /* 0x000fea0003c3ffff */ /*1150*/ BRA 0x1150; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*1160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*11a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*11b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*11c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*11d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*11e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*11f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void ThresholdKernel(float min, float max, int mode, float* input, float* output, int size, int count) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; __shared__ float delta; if(id < size) { if (threadIdx.x == 0) delta = (max - min)/count; __syncthreads(); for (int i = 0; i < count; i++) output[i * size + id] = 0; int idx; float fidx = ((input[id] - min) / delta); switch (mode) { case 0: // consider values outside of the interval <min,max> idx = (int)floor(fmaxf(0, fminf(fidx, count - 1))); break; case 1: // strict threshold if (fidx < 0.0f || fidx >= count) { return; } idx = (int)fidx; break; } output[idx * size + id] = 1.0f; } }
.file "tmpxft_0000b441_00000000-6_ThresholdKernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z42__device_stub__Z15ThresholdKernelffiPfS_iiffiPfS_ii .type _Z42__device_stub__Z15ThresholdKernelffiPfS_iiffiPfS_ii, @function _Z42__device_stub__Z15ThresholdKernelffiPfS_iiffiPfS_ii: .LFB2051: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movss %xmm0, 44(%rsp) movss %xmm1, 40(%rsp) movl %edi, 36(%rsp) movq %rsi, 24(%rsp) movq %rdx, 16(%rsp) movl %ecx, 32(%rsp) movl %r8d, 12(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 44(%rsp), %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rax movq %rax, 120(%rsp) leaq 36(%rsp), %rax movq %rax, 128(%rsp) leaq 24(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 32(%rsp), %rax movq %rax, 152(%rsp) leaq 12(%rsp), %rax movq %rax, 160(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z15ThresholdKernelffiPfS_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z42__device_stub__Z15ThresholdKernelffiPfS_iiffiPfS_ii, .-_Z42__device_stub__Z15ThresholdKernelffiPfS_iiffiPfS_ii .globl _Z15ThresholdKernelffiPfS_ii .type _Z15ThresholdKernelffiPfS_ii, @function _Z15ThresholdKernelffiPfS_ii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z15ThresholdKernelffiPfS_iiffiPfS_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z15ThresholdKernelffiPfS_ii, .-_Z15ThresholdKernelffiPfS_ii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z15ThresholdKernelffiPfS_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z15ThresholdKernelffiPfS_ii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void ThresholdKernel(float min, float max, int mode, float* input, float* output, int size, int count) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; __shared__ float delta; if(id < size) { if (threadIdx.x == 0) delta = (max - min)/count; __syncthreads(); for (int i = 0; i < count; i++) output[i * size + id] = 0; int idx; float fidx = ((input[id] - min) / delta); switch (mode) { case 0: // consider values outside of the interval <min,max> idx = (int)floor(fmaxf(0, fminf(fidx, count - 1))); break; case 1: // strict threshold if (fidx < 0.0f || fidx >= count) { return; } idx = (int)fidx; break; } output[idx * size + id] = 1.0f; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void ThresholdKernel(float min, float max, int mode, float* input, float* output, int size, int count) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; __shared__ float delta; if(id < size) { if (threadIdx.x == 0) delta = (max - min)/count; __syncthreads(); for (int i = 0; i < count; i++) output[i * size + id] = 0; int idx; float fidx = ((input[id] - min) / delta); switch (mode) { case 0: // consider values outside of the interval <min,max> idx = (int)floor(fmaxf(0, fminf(fidx, count - 1))); break; case 1: // strict threshold if (fidx < 0.0f || fidx >= count) { return; } idx = (int)fidx; break; } output[idx * size + id] = 1.0f; } }