system
stringclasses
7 values
input
stringlengths
10
94.5k
output
stringlengths
14
55.5k
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void grayScale(uchar3 *input, uchar3 *output) { int tid = threadIdx.x + blockIdx.x * blockDim.x; output[tid].x = (input[tid].x + input[tid].y + input[tid].z) / 3; output[tid].z = output[tid].y = output[tid].x; }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void grayScale(uchar3 *input, uchar3 *output) { int tid = threadIdx.x + blockIdx.x * blockDim.x; output[tid].x = (input[tid].x + input[tid].y + input[tid].z) / 3; output[tid].z = output[tid].y = output[tid].x; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void grayScale(uchar3 *input, uchar3 *output) { int tid = threadIdx.x + blockIdx.x * blockDim.x; output[tid].x = (input[tid].x + input[tid].y + input[tid].z) / 3; output[tid].z = output[tid].y = output[tid].x; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .globl _Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .p2align 8 .type _Z9grayScaleP15HIP_vector_typeIhLj3EES1_,@function _Z9grayScaleP15HIP_vector_typeIhLj3EES1_: s_clause 0x1 s_load_b32 s4, s[0:1], 0x1c s_load_b128 s[0:3], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s4, s4, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1] v_mad_i64_i32 v[2:3], null, v1, 3, s[0:1] s_clause 0x2 global_load_u8 v0, v[2:3], off global_load_u8 v4, v[2:3], off offset:1 global_load_u8 v2, v[2:3], off offset:2 s_waitcnt vmcnt(1) v_add_nc_u16 v0, v4, v0 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_add_nc_u16 v0, v0, v2 v_mad_i64_i32 v[2:3], null, v1, 3, s[2:3] v_and_b32_e32 v0, 0xffff, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_u32_u24_e32 v0, 0x5556, v0 v_lshrrev_b32_e32 v0, 16, v0 s_clause 0x2 global_store_b8 v[2:3], v0, off global_store_b8 v[2:3], v0, off offset:1 global_store_b8 v[2:3], v0, off offset:2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9grayScaleP15HIP_vector_typeIhLj3EES1_, .Lfunc_end0-_Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .private_segment_fixed_size: 0 .sgpr_count: 16 .sgpr_spill_count: 0 .symbol: _Z9grayScaleP15HIP_vector_typeIhLj3EES1_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void grayScale(uchar3 *input, uchar3 *output) { int tid = threadIdx.x + blockIdx.x * blockDim.x; output[tid].x = (input[tid].x + input[tid].y + input[tid].z) / 3; output[tid].z = output[tid].y = output[tid].x; }
.text .file "grayScale.hip" .globl _Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_ # -- Begin function _Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_ .p2align 4, 0x90 .type _Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_,@function _Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_: # @_Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z9grayScaleP15HIP_vector_typeIhLj3EES1_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_, .Lfunc_end0-_Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9grayScaleP15HIP_vector_typeIhLj3EES1_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z9grayScaleP15HIP_vector_typeIhLj3EES1_,@object # @_Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .section .rodata,"a",@progbits .globl _Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .p2align 3, 0x0 _Z9grayScaleP15HIP_vector_typeIhLj3EES1_: .quad _Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_ .size _Z9grayScaleP15HIP_vector_typeIhLj3EES1_, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z9grayScaleP15HIP_vector_typeIhLj3EES1_" .size .L__unnamed_1, 41 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z9grayScaleP6uchar3S0_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */ /* 0x000e220000002100 */ /*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 1.78813934326171875e-07 ; /* 0x00000003ff077435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e240000002500 */ /*0050*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */ /* 0x001fca00078e0206 */ /*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x000fca00078e0207 */ /*0070*/ LDG.E.U8 R0, [R2.64] ; /* 0x0000000402007981 */ /* 0x000ea8000c1e1100 */ /*0080*/ LDG.E.U8 R5, [R2.64+0x1] ; /* 0x0000010402057981 */ /* 0x000ea8000c1e1100 */ /*0090*/ LDG.E.U8 R4, [R2.64+0x2] ; /* 0x0000020402047981 */ /* 0x000ea2000c1e1100 */ /*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x168] ; /* 0x00005a0006067625 */ /* 0x000fe200078e0207 */ /*00b0*/ IADD3 R4, R4, R5, R0 ; /* 0x0000000504047210 */ /* 0x004fca0007ffe000 */ /*00c0*/ IMAD.WIDE.U32 R4, R4, -0x55555555, RZ ; /* 0xaaaaaaab04047825 */ /* 0x000fca00078e00ff */ /*00d0*/ SHF.R.U32.HI R5, RZ, 0x1, R5 ; /* 0x00000001ff057819 */ /* 0x000fca0000011605 */ /*00e0*/ STG.E.U8 [R6.64], R5 ; /* 0x0000000506007986 */ /* 0x000fe8000c101104 */ /*00f0*/ STG.E.U8 [R6.64+0x1], R5 ; /* 0x0000010506007986 */ /* 0x000fe8000c101104 */ /*0100*/ STG.E.U8 [R6.64+0x2], R5 ; /* 0x0000020506007986 */ /* 0x000fe2000c101104 */ /*0110*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0120*/ BRA 0x120; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .globl _Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .p2align 8 .type _Z9grayScaleP15HIP_vector_typeIhLj3EES1_,@function _Z9grayScaleP15HIP_vector_typeIhLj3EES1_: s_clause 0x1 s_load_b32 s4, s[0:1], 0x1c s_load_b128 s[0:3], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s4, s4, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1] v_mad_i64_i32 v[2:3], null, v1, 3, s[0:1] s_clause 0x2 global_load_u8 v0, v[2:3], off global_load_u8 v4, v[2:3], off offset:1 global_load_u8 v2, v[2:3], off offset:2 s_waitcnt vmcnt(1) v_add_nc_u16 v0, v4, v0 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_add_nc_u16 v0, v0, v2 v_mad_i64_i32 v[2:3], null, v1, 3, s[2:3] v_and_b32_e32 v0, 0xffff, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_u32_u24_e32 v0, 0x5556, v0 v_lshrrev_b32_e32 v0, 16, v0 s_clause 0x2 global_store_b8 v[2:3], v0, off global_store_b8 v[2:3], v0, off offset:1 global_store_b8 v[2:3], v0, off offset:2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9grayScaleP15HIP_vector_typeIhLj3EES1_, .Lfunc_end0-_Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .private_segment_fixed_size: 0 .sgpr_count: 16 .sgpr_spill_count: 0 .symbol: _Z9grayScaleP15HIP_vector_typeIhLj3EES1_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001afa38_00000000-6_grayScale.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z37__device_stub__Z9grayScaleP6uchar3S0_P6uchar3S0_ .type _Z37__device_stub__Z9grayScaleP6uchar3S0_P6uchar3S0_, @function _Z37__device_stub__Z9grayScaleP6uchar3S0_P6uchar3S0_: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z9grayScaleP6uchar3S0_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z37__device_stub__Z9grayScaleP6uchar3S0_P6uchar3S0_, .-_Z37__device_stub__Z9grayScaleP6uchar3S0_P6uchar3S0_ .globl _Z9grayScaleP6uchar3S0_ .type _Z9grayScaleP6uchar3S0_, @function _Z9grayScaleP6uchar3S0_: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z9grayScaleP6uchar3S0_P6uchar3S0_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z9grayScaleP6uchar3S0_, .-_Z9grayScaleP6uchar3S0_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z9grayScaleP6uchar3S0_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z9grayScaleP6uchar3S0_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "grayScale.hip" .globl _Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_ # -- Begin function _Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_ .p2align 4, 0x90 .type _Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_,@function _Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_: # @_Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z9grayScaleP15HIP_vector_typeIhLj3EES1_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_, .Lfunc_end0-_Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9grayScaleP15HIP_vector_typeIhLj3EES1_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z9grayScaleP15HIP_vector_typeIhLj3EES1_,@object # @_Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .section .rodata,"a",@progbits .globl _Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .p2align 3, 0x0 _Z9grayScaleP15HIP_vector_typeIhLj3EES1_: .quad _Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_ .size _Z9grayScaleP15HIP_vector_typeIhLj3EES1_, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z9grayScaleP15HIP_vector_typeIhLj3EES1_" .size .L__unnamed_1, 41 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__grayScaleP15HIP_vector_typeIhLj3EES1_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9grayScaleP15HIP_vector_typeIhLj3EES1_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* * Tiled Matrix Multiplication * (MP2, Fall 2014, GPU Programming/Auburn University) * * Compile with -DTILE_WIDTH=16 (for example) to change the tile size. * Compile with -DSEED=12 (for example) to seed the random number generator. */ #include <assert.h> #include <cuda.h> #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> /* Usage message displayed when invalid command line arguments are supplied */ #define USAGE \ "MP2 generates a random (m x k) matrix M and (k x n) matrix N\n" \ "and multiplies M by N using tiled matrix multiplication.\n" \ "The values of m, k, and n must be >= 1.\n" \ "\n" \ "Usage: mp2 m k n\n" /* Tile size -- define here if not defined using the -D compiler flag */ #ifndef TILE_WIDTH # define TILE_WIDTH 16 #endif /* Seed for the random number generator -- define here if not using -D */ #ifndef SEED # define SEED 1 #endif /* Maximum difference allowed between the GPU and CPU result matrices */ #define EPSILON 1e-2 /* If a CUDA call fails, display an error message and exit */ #define CUDA_CHECK(e) { \ cudaError_t err = (e); \ if (err != cudaSuccess) \ { \ fprintf(stderr, "CUDA error: %s, line %d, %s: %s\n", \ __FILE__, __LINE__, #e, cudaGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } /* assert() is only supported on devices of compute capability >= 2.0 */ #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) # undef assert # define assert(arg) #endif void getDistance(float *h_M, float *h_P, int m, int k, int col, int row){ float expected = 0.0; for (int i = 0; i < k; i++) { expected += pow(h_M[row*k+i] - h_M[col*k+i], 2); } expected = sqrt(expected); h_P[row*m+col] = expected; } void argMin(float* h_M, int* result, int m, int k, int row){ float minimum = 1e5; int pos = -1; for(int col = 0; col < m; col++){ if (h_M[row*m+col] < minimum){ pos = col; minimum = h_M[row*m+col]; } } result[row] = pos; } __global__ static void argMin_gpu(float* h_M, int* result, int m, int k){ assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH); int row = blockIdx.y * TILE_WIDTH + threadIdx.y; if(row >= m) return; float minimum = 1e5; int pos = -1; for(int col = 0; col < m; col++){ if (h_M[row*m+col] < minimum){ pos = col; minimum = h_M[row*m+col]; } } result[row] = pos; } __global__ static void getDistance_gpu(float *d_M, float *d_P, int m, int k) { assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH); int row = blockIdx.y * TILE_WIDTH + threadIdx.y; int col= blockIdx.x * TILE_WIDTH + threadIdx.x; if(row >= m || col >= m) return; if(row == col){ d_P[row*m+col] = 100; return; } float expected = 0.0; for (int i = 0; i < k; i++) { expected += pow(d_M[row*k+i] - d_M[col*k+i], 2); } expected = sqrt(expected); d_P[row*m+col] = expected; } /* Displays one row of the given matrix */ static void printRow(int row, float *matrix, int cols) { printf("["); if (cols >= 1) printf(" %3.3f", matrix[row*cols+0]); if (cols >= 2) printf(" %3.3f", matrix[row*cols+1]); if (cols >= 3) printf(" %3.3f", matrix[row*cols+2]); if (cols >= 6) printf(" ..."); if (cols >= 5) printf(" %3.3f", matrix[row*cols+(cols-2)]); if (cols >= 4) printf(" %3.3f", matrix[row*cols+(cols-1)]); printf(" ]\n"); } /* Displays the given matrix */ static void printMatrix(float *matrix, int rows, int cols) { if (rows >= 1) printRow(0, matrix, cols); if (rows >= 2) printRow(1, matrix, cols); if (rows >= 3) printRow(2, matrix, cols); if (rows >= 6) printf(" ...\n"); if (rows >= 5) printRow(rows-2, matrix, cols); if (rows >= 4) printRow(rows-1, matrix, cols); } /* Program entrypoint. Invoke with three command line arguments: m k n */ int main() { printf("%d, %d, %d, %d\n", sizeof(long), sizeof(long long), sizeof(bool), sizeof(char)); /* Get command line arguments; save as m, k, and n */ int m = 100; int k = 128; if (m < 1 || k < 1) { fprintf(stderr, USAGE); fprintf(stderr, "Invalid value for m or k (%d, %d)\n", m, k); return EXIT_FAILURE; } printf("using (%d x %d) tiles.\n", TILE_WIDTH, TILE_WIDTH); /********************************************/ /* M is (m x k), P is (m x m) */ /********************************************/ /* Compute number of bytes needed to stores matrices M and P */ size_t bytesForM = m * k * sizeof(float); size_t bytesForP = m * m * sizeof(float); /* Allocate host memory for matrices */ float *h_M, *h_P; float *result = new float[m*m]; int *index = new int[m]; h_M = (float *)malloc(bytesForM); h_P = (float *)malloc(bytesForP); if (h_M == NULL || h_P == NULL) { fprintf(stderr, "Unable to allocate host memory\n"); return EXIT_FAILURE; } /* Allocate device memory for matrices */ float *d_M, *d_P; int *d_index; CUDA_CHECK(cudaMalloc((void **)&d_M, bytesForM)); CUDA_CHECK(cudaMalloc((void **)&d_P, bytesForP)); CUDA_CHECK(cudaMalloc((void **)&d_index, m*sizeof(int))); /* Fill M (on host) */ std::cout << "Loading matrices...\n"; std::ifstream in1, in2; in1.open("descriptor.txt"); for (int i = 0; i < m*k; ++i) in1 >> h_M[i]; in1.close(); printf("M =\n"); printMatrix(h_M, m, k); /* Copy M to device global memory */ CUDA_CHECK(cudaMemcpy(d_M, h_M, bytesForM, cudaMemcpyHostToDevice)); /* Launch the CUDA kernel */ dim3 dimGrid((m+TILE_WIDTH-1)/TILE_WIDTH, (m+TILE_WIDTH-1)/TILE_WIDTH); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); printf("matMul called from host\n"); getDistance_gpu<<<dimGrid, dimBlock>>>(d_M, d_P, m, k); argMin_gpu<<<dimGrid, dimBlock>>>(d_P, d_index, m, k); CUDA_CHECK(cudaDeviceSynchronize()); /* Copy result matrix from device global memory back to host memory */ CUDA_CHECK(cudaMemcpy(h_P, d_P, bytesForP, cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(index, d_index, m*sizeof(int), cudaMemcpyDeviceToHost)); printf(" product received from host\n"); printf("P =\n"); printMatrix(h_P, m, m); std::ofstream out, out2; out2.open("matrix.txt"); for (int i = 0; i < 100; i++){ for (int j = 0; j < m; j++){ if (h_P[i*m+j] < 0.3) out2 << j+1 << " "; } out2 << std::endl; } out2.close(); /* for (int row = 0; row < m; row++) { for (int col = 0; col < m; col++) { getDistance(h_M, result, m, k, col, row); } } printf("\nExpected matrix:\n"); printMatrix(result, m, m); printf("\n"); for (int i = 0; i < m; i++){ printf("%d ", index[i]); } printf("\n"); */ /* Free device global memory */ CUDA_CHECK(cudaFree(d_M)); CUDA_CHECK(cudaFree(d_P)); CUDA_CHECK(cudaFree(d_index)); /* Free host memory */ free(h_M); free(h_P); free(index); free(result); /* Reset the device (unnecessary if not profiling, but good practice) */ CUDA_CHECK(cudaDeviceReset()); printf("Done\n"); system("Pause"); return EXIT_SUCCESS; }
.file "tmpxft_0014e06d_00000000-6_mp3.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL15getDistance_gpuPfS_ii, @function _ZL15getDistance_gpuPfS_ii: .LFB3832: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax movq %rdi, 16(%rsp) movq %rsi, 24(%rsp) movl %edx, 8(%rsp) movl %ecx, 12(%rsp) leaq 16(%rsp), %rax movq %rax, 96(%rsp) leaq 24(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 12(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L5 .L1: movq 136(%rsp), %rax subq %fs:40, %rax jne .L6 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L5: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _ZL15getDistance_gpuPfS_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L1 .L6: call __stack_chk_fail@PLT .cfi_endproc .LFE3832: .size _ZL15getDistance_gpuPfS_ii, .-_ZL15getDistance_gpuPfS_ii .type _ZL10argMin_gpuPfPiii, @function _ZL10argMin_gpuPfPiii: .LFB3830: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax movq %rdi, 16(%rsp) movq %rsi, 24(%rsp) movl %edx, 8(%rsp) movl %ecx, 12(%rsp) leaq 16(%rsp), %rax movq %rax, 96(%rsp) leaq 24(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 12(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L11 .L7: movq 136(%rsp), %rax subq %fs:40, %rax jne .L12 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _ZL10argMin_gpuPfPiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L7 .L12: call __stack_chk_fail@PLT .cfi_endproc .LFE3830: .size _ZL10argMin_gpuPfPiii, .-_ZL10argMin_gpuPfPiii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "[" .LC1: .string " %3.3f" .LC2: .string " ..." .LC3: .string " ]\n" .text .type _ZL8printRowiPfi, @function _ZL8printRowiPfi: .LFB3802: .cfi_startproc pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movl %edi, %r12d movq %rsi, %rbp movl %edx, %ebx leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT testl %ebx, %ebx jg .L21 .L19: leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L21: .cfi_restore_state movl %ebx, %r14d imull %r12d, %r14d movslq %r14d, %r13 leaq 0(,%r13,4), %r15 pxor %xmm0, %xmm0 cvtss2sd 0(%rbp,%r13,4), %xmm0 leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT cmpl $1, %ebx jle .L19 pxor %xmm0, %xmm0 cvtss2sd 4(%rbp,%r15), %xmm0 leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT cmpl $2, %ebx jle .L19 pxor %xmm0, %xmm0 cvtss2sd 8(%rbp,%r13,4), %xmm0 leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT cmpl $5, %ebx jg .L22 jne .L17 .L16: leal -2(%r14,%rbx), %eax cltq pxor %xmm0, %xmm0 cvtss2sd 0(%rbp,%rax,4), %xmm0 leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT .L18: movl %r12d, %eax imull %ebx, %eax leal -1(%rbx,%rax), %eax cltq pxor %xmm0, %xmm0 cvtss2sd 0(%rbp,%rax,4), %xmm0 leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT jmp .L19 .L22: leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L16 .L17: cmpl $3, %ebx jle .L19 jmp .L18 .cfi_endproc .LFE3802: .size _ZL8printRowiPfi, .-_ZL8printRowiPfi .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3807: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3807: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z11getDistancePfS_iiii .type _Z11getDistancePfS_iiii, @function _Z11getDistancePfS_iiii: .LFB3800: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $8, %rsp .cfi_def_cfa_offset 48 movq %rsi, %r12 movl %edx, %r13d movl %r8d, %ebp movl %r9d, %ebx testl %ecx, %ecx jle .L31 movl %ecx, %edx imull %r8d, %edx movl %ecx, %esi imull %r9d, %esi movslq %esi, %rsi leaq (%rdi,%rsi,4), %rax movslq %ecx, %rcx addq %rsi, %rcx leaq (%rdi,%rcx,4), %rcx pxor %xmm0, %xmm0 movslq %edx, %rdx subq %rsi, %rdx .L27: movss (%rax), %xmm1 subss (%rax,%rdx,4), %xmm1 cvtss2sd %xmm1, %xmm1 mulsd %xmm1, %xmm1 cvtss2sd %xmm0, %xmm0 addsd %xmm0, %xmm1 pxor %xmm0, %xmm0 cvtsd2ss %xmm1, %xmm0 addq $4, %rax cmpq %rcx, %rax jne .L27 pxor %xmm1, %xmm1 ucomiss %xmm0, %xmm1 ja .L35 .L26: sqrtss %xmm0, %xmm0 .L30: imull %r13d, %ebx addl %ebp, %ebx movslq %ebx, %rbx movss %xmm0, (%r12,%rbx,4) addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L31: .cfi_restore_state pxor %xmm0, %xmm0 jmp .L26 .L35: call sqrtf@PLT jmp .L30 .cfi_endproc .LFE3800: .size _Z11getDistancePfS_iiii, .-_Z11getDistancePfS_iiii .globl _Z6argMinPfPiiii .type _Z6argMinPfPiiii, @function _Z6argMinPfPiiii: .LFB3801: .cfi_startproc endbr64 testl %edx, %edx jle .L41 movslq %edx, %r9 imull %r8d, %edx movslq %edx, %rdx leaq (%rdi,%rdx,4), %rcx movl $0, %eax movl $-1, %edx movss .LC5(%rip), %xmm0 .L40: movss (%rcx,%rax,4), %xmm1 comiss %xmm1, %xmm0 cmova %eax, %edx minss %xmm0, %xmm1 movaps %xmm1, %xmm0 addq $1, %rax cmpq %r9, %rax jne .L40 .L37: movslq %r8d, %r8 movl %edx, (%rsi,%r8,4) ret .L41: movl $-1, %edx jmp .L37 .cfi_endproc .LFE3801: .size _Z6argMinPfPiiii, .-_Z6argMinPfPiiii .section .rodata.str1.1 .LC6: .string "_Z15getDistance_gpuPfS_ii" .LC7: .string "_Z10argMin_gpuPfPiii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3834: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq _ZL15getDistance_gpuPfS_ii(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _ZL10argMin_gpuPfPiii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3834: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .rodata.str1.1 .LC8: .string "%d, %d, %d, %d\n" .LC9: .string "using (%d x %d) tiles.\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC10: .string "Unable to allocate host memory\n" .align 8 .LC11: .string "cudaMalloc((void **)&d_M, bytesForM)" .align 8 .LC12: .string "/home/ubuntu/Datasets/stackv2/train-structured/goldenpartner/Face-recognition/master/mp3.cu" .align 8 .LC13: .string "CUDA error: %s, line %d, %s: %s\n" .align 8 .LC14: .string "cudaMalloc((void **)&d_P, bytesForP)" .align 8 .LC15: .string "cudaMalloc((void **)&d_index, m*sizeof(int))" .section .rodata.str1.1 .LC16: .string "Loading matrices...\n" .LC17: .string "descriptor.txt" .LC18: .string "M =\n" .LC19: .string " ...\n" .section .rodata.str1.8 .align 8 .LC20: .string "cudaMemcpy(d_M, h_M, bytesForM, cudaMemcpyHostToDevice)" .section .rodata.str1.1 .LC21: .string "matMul called from host\n" .LC22: .string "cudaDeviceSynchronize()" .section .rodata.str1.8 .align 8 .LC23: .string "cudaMemcpy(h_P, d_P, bytesForP, cudaMemcpyDeviceToHost)" .align 8 .LC24: .string "cudaMemcpy(index, d_index, m*sizeof(int), cudaMemcpyDeviceToHost)" .section .rodata.str1.1 .LC25: .string " product received from host\n" .LC26: .string "P =\n" .LC27: .string "matrix.txt" .LC29: .string " " .LC30: .string "cudaFree(d_M)" .LC31: .string "cudaFree(d_P)" .LC32: .string "cudaFree(d_index)" .LC33: .string "cudaDeviceReset()" .LC34: .string "Done\n" .LC35: .string "Pause" .text .globl main .type main, @function main: .LFB3804: .cfi_startproc .cfi_personality 0x9b,DW.ref.__gxx_personality_v0 .cfi_lsda 0x1b,.LLSDA3804 endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 movq %rsp, %rbp .cfi_def_cfa_register 6 pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $2168, %rsp .cfi_offset 15, -24 .cfi_offset 14, -32 .cfi_offset 13, -40 .cfi_offset 12, -48 .cfi_offset 3, -56 movq %fs:40, %rax movq %rax, -56(%rbp) xorl %eax, %eax movl $1, %r9d movl $1, %r8d movl $8, %ecx movl $8, %edx leaq .LC8(%rip), %rsi movl $2, %edi .LEHB0: call __printf_chk@PLT movl $16, %ecx movl $16, %edx leaq .LC9(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $400, %edi call _Znam@PLT movq %rax, -2200(%rbp) movl $51200, %edi call malloc@PLT movq %rax, %r14 movl $40000, %edi call malloc@PLT movq %rax, -2192(%rbp) testq %r14, %r14 je .L83 testq %rax, %rax je .L83 leaq -2176(%rbp), %rdi movl $51200, %esi call cudaMalloc@PLT movl %eax, %edi testl %eax, %eax jne .L88 leaq -2168(%rbp), %rdi movl $40000, %esi call cudaMalloc@PLT movl %eax, %edi testl %eax, %eax jne .L89 leaq -2160(%rbp), %rdi movl $400, %esi call cudaMalloc@PLT movl %eax, %edi testl %eax, %eax jne .L90 leaq .LC16(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT leaq -1104(%rbp), %rdi call _ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1Ev@PLT .LEHE0: leaq -576(%rbp), %rdi .LEHB1: call _ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1Ev@PLT .LEHE1: jmp .L91 .L83: leaq .LC10(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax .LEHB2: call __fprintf_chk@PLT movl $1, %eax jmp .L46 .L88: call cudaGetErrorString@PLT subq $8, %rsp pushq %rax leaq .LC11(%rip), %r9 movl $178, %r8d leaq .LC12(%rip), %rcx leaq .LC13(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L89: call cudaGetErrorString@PLT subq $8, %rsp pushq %rax leaq .LC14(%rip), %r9 movl $179, %r8d leaq .LC12(%rip), %rcx leaq .LC13(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L90: call cudaGetErrorString@PLT subq $8, %rsp pushq %rax leaq .LC15(%rip), %r9 movl $180, %r8d leaq .LC12(%rip), %rcx leaq .LC13(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT .LEHE2: movl $1, %edi call exit@PLT .L91: leaq -1104(%rbp), %rdi movl $8, %edx leaq .LC17(%rip), %rsi .LEHB3: call _ZNSt14basic_ifstreamIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode@PLT movq %r14, %rbx leaq 51200(%r14), %rax movq %rax, %r15 leaq -1104(%rbp), %r12 jmp .L53 .L93: addq $4, %rbx cmpq %r15, %rbx je .L92 .L53: movq %rbx, %rsi movq %r12, %rdi call _ZNSi10_M_extractIfEERSiRT_@PLT jmp .L93 .L92: leaq -1104(%rbp), %rdi call _ZNSt14basic_ifstreamIcSt11char_traitsIcEE5closeEv@PLT leaq .LC18(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $128, %edx movq %r14, %rsi movl $0, %edi call _ZL8printRowiPfi movl $128, %edx movq %r14, %rsi movl $1, %edi call _ZL8printRowiPfi movl $128, %edx movq %r14, %rsi movl $2, %edi call _ZL8printRowiPfi leaq .LC19(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $128, %edx movq %r14, %rsi movl $98, %edi call _ZL8printRowiPfi movl $128, %edx movq %r14, %rsi movl $99, %edi call _ZL8printRowiPfi movl $1, %ecx movl $51200, %edx movq %r14, %rsi movq -2176(%rbp), %rdi call cudaMemcpy@PLT movl %eax, %edi testl %eax, %eax jne .L94 movl $7, -2152(%rbp) movl $7, -2148(%rbp) movl $1, -2144(%rbp) movl $16, -2140(%rbp) movl $16, -2136(%rbp) movl $1, -2132(%rbp) leaq .LC21(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L95 .L94: call cudaGetErrorString@PLT subq $8, %rsp pushq %rax leaq .LC20(%rip), %r9 movl $197, %r8d leaq .LC12(%rip), %rcx leaq .LC13(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax .cfi_escape 0x2e,0x10 call __fprintf_chk@PLT addq $16, %rsp movl $1, %edi call exit@PLT .L95: movl -2132(%rbp), %ecx movl $0, %r9d movl $0, %r8d movq -2140(%rbp), %rdx movq -2152(%rbp), %rdi movl -2144(%rbp), %esi .cfi_escape 0x2e,0 call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L55 movl $128, %ecx movl $100, %edx movq -2168(%rbp), %rsi movq -2176(%rbp), %rdi call _ZL15getDistance_gpuPfS_ii .L55: movl -2132(%rbp), %ecx movl $0, %r9d movl $0, %r8d movq -2140(%rbp), %rdx movq -2152(%rbp), %rdi movl -2144(%rbp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L56 movl $128, %ecx movl $100, %edx movq -2160(%rbp), %rsi movq -2168(%rbp), %rdi call _ZL10argMin_gpuPfPiii .L56: call cudaDeviceSynchronize@PLT movl %eax, %edi testl %eax, %eax jne .L96 movl $2, %ecx movl $40000, %edx movq -2168(%rbp), %rsi movq -2192(%rbp), %rdi call cudaMemcpy@PLT jmp .L97 .L96: call cudaGetErrorString@PLT subq $8, %rsp pushq %rax leaq .LC22(%rip), %r9 movl $207, %r8d leaq .LC12(%rip), %rcx leaq .LC13(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax .cfi_escape 0x2e,0x10 call __fprintf_chk@PLT addq $16, %rsp movl $1, %edi call exit@PLT .L97: movl %eax, %edi testl %eax, %eax jne .L98 movl $2, %ecx movl $400, %edx movq -2160(%rbp), %rsi movq -2200(%rbp), %rdi .cfi_escape 0x2e,0 call cudaMemcpy@PLT jmp .L99 .L98: call cudaGetErrorString@PLT subq $8, %rsp pushq %rax leaq .LC23(%rip), %r9 movl $210, %r8d leaq .LC12(%rip), %rcx leaq .LC13(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax .cfi_escape 0x2e,0x10 call __fprintf_chk@PLT addq $16, %rsp movl $1, %edi call exit@PLT .L99: movl %eax, %edi testl %eax, %eax jne .L100 leaq .LC25(%rip), %rsi movl $2, %edi movl $0, %eax .cfi_escape 0x2e,0 call __printf_chk@PLT jmp .L101 .L100: call cudaGetErrorString@PLT subq $8, %rsp pushq %rax leaq .LC24(%rip), %r9 movl $211, %r8d leaq .LC12(%rip), %rcx leaq .LC13(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax .cfi_escape 0x2e,0x10 call __fprintf_chk@PLT addq $16, %rsp movl $1, %edi call exit@PLT .L101: leaq .LC26(%rip), %rsi movl $2, %edi movl $0, %eax .cfi_escape 0x2e,0 call __printf_chk@PLT movl $100, %edx movq -2192(%rbp), %rbx movq %rbx, %rsi movl $0, %edi call _ZL8printRowiPfi movl $100, %edx movq %rbx, %rsi movl $1, %edi call _ZL8printRowiPfi movl $100, %edx movq %rbx, %rsi movl $2, %edi call _ZL8printRowiPfi leaq .LC19(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $100, %edx movq %rbx, %rsi movl $98, %edi call _ZL8printRowiPfi movl $100, %edx movq %rbx, %rsi movl $99, %edi call _ZL8printRowiPfi leaq -2128(%rbp), %rdi call _ZNSt14basic_ofstreamIcSt11char_traitsIcEEC1Ev@PLT .LEHE3: leaq -1616(%rbp), %rdi .LEHB4: call _ZNSt14basic_ofstreamIcSt11char_traitsIcEEC1Ev@PLT .LEHE4: leaq -1616(%rbp), %rdi movl $16, %edx leaq .LC27(%rip), %rsi .LEHB5: call _ZNSt14basic_ofstreamIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode@PLT movq %rbx, -2184(%rbp) leaq 40000(%rbx), %r15 leaq -1616(%rbp), %r12 leaq .LC29(%rip), %r13 jmp .L60 .L103: movq %rax, %rdi movl $1, %edx movq %r13, %rsi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT .L61: addq $1, %rbx cmpq $101, %rbx je .L102 .L63: movq -2184(%rbp), %rax pxor %xmm0, %xmm0 cvtss2sd -4(%rax,%rbx,4), %xmm0 movsd .LC28(%rip), %xmm1 comisd %xmm0, %xmm1 jbe .L61 movl %ebx, %esi movq %r12, %rdi call _ZNSolsEi@PLT jmp .L103 .L102: movq -1616(%rbp), %rax movq -24(%rax), %rax movq -1376(%rbp,%rax), %rbx testq %rbx, %rbx je .L104 cmpb $0, 56(%rbx) je .L66 movzbl 67(%rbx), %eax .L67: movsbl %al, %esi movq %r12, %rdi call _ZNSo3putEc@PLT jmp .L105 .L104: movq -56(%rbp), %rax subq %fs:40, %rax jne .L106 call _ZSt16__throw_bad_castv@PLT .L82: endbr64 movq %rax, %rbx leaq -1616(%rbp), %rdi call _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev@PLT .L74: leaq -2128(%rbp), %rdi call _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev@PLT .L75: leaq -576(%rbp), %rdi call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT .L76: leaq -1104(%rbp), %rdi call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT movq -56(%rbp), %rax subq %fs:40, %rax je .L77 call __stack_chk_fail@PLT .L106: call __stack_chk_fail@PLT .L66: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) jmp .L67 .L105: movq %rax, %rdi call _ZNSo5flushEv@PLT addq $400, -2184(%rbp) movq -2184(%rbp), %rax cmpq %r15, %rax je .L68 .L60: movl $1, %ebx jmp .L63 .L68: leaq -1616(%rbp), %rdi call _ZNSt14basic_ofstreamIcSt11char_traitsIcEE5closeEv@PLT movq -2176(%rbp), %rdi call cudaFree@PLT movl %eax, %edi testl %eax, %eax jne .L107 movq -2168(%rbp), %rdi call cudaFree@PLT jmp .L108 .L107: call cudaGetErrorString@PLT subq $8, %rsp pushq %rax leaq .LC30(%rip), %r9 movl $249, %r8d leaq .LC12(%rip), %rcx leaq .LC13(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax .cfi_escape 0x2e,0x10 call __fprintf_chk@PLT addq $16, %rsp movl $1, %edi call exit@PLT .L108: movl %eax, %edi testl %eax, %eax jne .L109 movq -2160(%rbp), %rdi .cfi_escape 0x2e,0 call cudaFree@PLT jmp .L110 .L109: call cudaGetErrorString@PLT subq $8, %rsp pushq %rax leaq .LC31(%rip), %r9 movl $250, %r8d leaq .LC12(%rip), %rcx leaq .LC13(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax .cfi_escape 0x2e,0x10 call __fprintf_chk@PLT addq $16, %rsp movl $1, %edi call exit@PLT .L110: movl %eax, %edi testl %eax, %eax jne .L111 movq %r14, %rdi call free@PLT movq -2192(%rbp), %rdi call free@PLT movq -2200(%rbp), %rdi call free@PLT .cfi_escape 0x2e,0 call cudaDeviceReset@PLT jmp .L112 .L111: call cudaGetErrorString@PLT subq $8, %rsp pushq %rax leaq .LC32(%rip), %r9 movl $251, %r8d leaq .LC12(%rip), %rcx leaq .LC13(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax .cfi_escape 0x2e,0x10 call __fprintf_chk@PLT addq $16, %rsp movl $1, %edi call exit@PLT .L112: movl %eax, %edi testl %eax, %eax jne .L113 leaq .LC34(%rip), %rsi movl $2, %edi movl $0, %eax .cfi_escape 0x2e,0 call __printf_chk@PLT jmp .L114 .L113: call cudaGetErrorString@PLT subq $8, %rsp pushq %rax leaq .LC33(%rip), %r9 movl $261, %r8d leaq .LC12(%rip), %rcx leaq .LC13(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax .cfi_escape 0x2e,0x10 call __fprintf_chk@PLT addq $16, %rsp movl $1, %edi call exit@PLT .L114: leaq .LC35(%rip), %rdi .cfi_escape 0x2e,0 call system@PLT .LEHE5: leaq -1616(%rbp), %rdi call _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev@PLT leaq -2128(%rbp), %rdi call _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev@PLT leaq -576(%rbp), %rdi call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT leaq -1104(%rbp), %rdi call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT movl $0, %eax .L46: movq -56(%rbp), %rdx subq %fs:40, %rdx jne .L115 leaq -40(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp .cfi_remember_state .cfi_def_cfa 7, 8 ret .L81: .cfi_restore_state endbr64 movq %rax, %rbx jmp .L74 .L80: endbr64 movq %rax, %rbx jmp .L75 .L79: endbr64 movq %rax, %rbx jmp .L76 .L77: movq %rbx, %rdi .LEHB6: call _Unwind_Resume@PLT .LEHE6: .L115: call __stack_chk_fail@PLT .cfi_endproc .LFE3804: .globl __gxx_personality_v0 .section .gcc_except_table,"a",@progbits .LLSDA3804: .byte 0xff .byte 0xff .byte 0x1 .uleb128 .LLSDACSE3804-.LLSDACSB3804 .LLSDACSB3804: .uleb128 .LEHB0-.LFB3804 .uleb128 .LEHE0-.LEHB0 .uleb128 0 .uleb128 0 .uleb128 .LEHB1-.LFB3804 .uleb128 .LEHE1-.LEHB1 .uleb128 .L79-.LFB3804 .uleb128 0 .uleb128 .LEHB2-.LFB3804 .uleb128 .LEHE2-.LEHB2 .uleb128 0 .uleb128 0 .uleb128 .LEHB3-.LFB3804 .uleb128 .LEHE3-.LEHB3 .uleb128 .L80-.LFB3804 .uleb128 0 .uleb128 .LEHB4-.LFB3804 .uleb128 .LEHE4-.LEHB4 .uleb128 .L81-.LFB3804 .uleb128 0 .uleb128 .LEHB5-.LFB3804 .uleb128 .LEHE5-.LEHB5 .uleb128 .L82-.LFB3804 .uleb128 0 .uleb128 .LEHB6-.LFB3804 .uleb128 .LEHE6-.LEHB6 .uleb128 0 .uleb128 0 .LLSDACSE3804: .text .size main, .-main .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC5: .long 1203982336 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC28: .long 858993459 .long 1070805811 .hidden DW.ref.__gxx_personality_v0 .weak DW.ref.__gxx_personality_v0 .section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat .align 8 .type DW.ref.__gxx_personality_v0, @object .size DW.ref.__gxx_personality_v0, 8 DW.ref.__gxx_personality_v0: .quad __gxx_personality_v0 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* * Tiled Matrix Multiplication * (MP2, Fall 2014, GPU Programming/Auburn University) * * Compile with -DTILE_WIDTH=16 (for example) to change the tile size. * Compile with -DSEED=12 (for example) to seed the random number generator. */ #include <assert.h> #include <cuda.h> #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> /* Usage message displayed when invalid command line arguments are supplied */ #define USAGE \ "MP2 generates a random (m x k) matrix M and (k x n) matrix N\n" \ "and multiplies M by N using tiled matrix multiplication.\n" \ "The values of m, k, and n must be >= 1.\n" \ "\n" \ "Usage: mp2 m k n\n" /* Tile size -- define here if not defined using the -D compiler flag */ #ifndef TILE_WIDTH # define TILE_WIDTH 16 #endif /* Seed for the random number generator -- define here if not using -D */ #ifndef SEED # define SEED 1 #endif /* Maximum difference allowed between the GPU and CPU result matrices */ #define EPSILON 1e-2 /* If a CUDA call fails, display an error message and exit */ #define CUDA_CHECK(e) { \ cudaError_t err = (e); \ if (err != cudaSuccess) \ { \ fprintf(stderr, "CUDA error: %s, line %d, %s: %s\n", \ __FILE__, __LINE__, #e, cudaGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } /* assert() is only supported on devices of compute capability >= 2.0 */ #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) # undef assert # define assert(arg) #endif void getDistance(float *h_M, float *h_P, int m, int k, int col, int row){ float expected = 0.0; for (int i = 0; i < k; i++) { expected += pow(h_M[row*k+i] - h_M[col*k+i], 2); } expected = sqrt(expected); h_P[row*m+col] = expected; } void argMin(float* h_M, int* result, int m, int k, int row){ float minimum = 1e5; int pos = -1; for(int col = 0; col < m; col++){ if (h_M[row*m+col] < minimum){ pos = col; minimum = h_M[row*m+col]; } } result[row] = pos; } __global__ static void argMin_gpu(float* h_M, int* result, int m, int k){ assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH); int row = blockIdx.y * TILE_WIDTH + threadIdx.y; if(row >= m) return; float minimum = 1e5; int pos = -1; for(int col = 0; col < m; col++){ if (h_M[row*m+col] < minimum){ pos = col; minimum = h_M[row*m+col]; } } result[row] = pos; } __global__ static void getDistance_gpu(float *d_M, float *d_P, int m, int k) { assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH); int row = blockIdx.y * TILE_WIDTH + threadIdx.y; int col= blockIdx.x * TILE_WIDTH + threadIdx.x; if(row >= m || col >= m) return; if(row == col){ d_P[row*m+col] = 100; return; } float expected = 0.0; for (int i = 0; i < k; i++) { expected += pow(d_M[row*k+i] - d_M[col*k+i], 2); } expected = sqrt(expected); d_P[row*m+col] = expected; } /* Displays one row of the given matrix */ static void printRow(int row, float *matrix, int cols) { printf("["); if (cols >= 1) printf(" %3.3f", matrix[row*cols+0]); if (cols >= 2) printf(" %3.3f", matrix[row*cols+1]); if (cols >= 3) printf(" %3.3f", matrix[row*cols+2]); if (cols >= 6) printf(" ..."); if (cols >= 5) printf(" %3.3f", matrix[row*cols+(cols-2)]); if (cols >= 4) printf(" %3.3f", matrix[row*cols+(cols-1)]); printf(" ]\n"); } /* Displays the given matrix */ static void printMatrix(float *matrix, int rows, int cols) { if (rows >= 1) printRow(0, matrix, cols); if (rows >= 2) printRow(1, matrix, cols); if (rows >= 3) printRow(2, matrix, cols); if (rows >= 6) printf(" ...\n"); if (rows >= 5) printRow(rows-2, matrix, cols); if (rows >= 4) printRow(rows-1, matrix, cols); } /* Program entrypoint. Invoke with three command line arguments: m k n */ int main() { printf("%d, %d, %d, %d\n", sizeof(long), sizeof(long long), sizeof(bool), sizeof(char)); /* Get command line arguments; save as m, k, and n */ int m = 100; int k = 128; if (m < 1 || k < 1) { fprintf(stderr, USAGE); fprintf(stderr, "Invalid value for m or k (%d, %d)\n", m, k); return EXIT_FAILURE; } printf("using (%d x %d) tiles.\n", TILE_WIDTH, TILE_WIDTH); /********************************************/ /* M is (m x k), P is (m x m) */ /********************************************/ /* Compute number of bytes needed to stores matrices M and P */ size_t bytesForM = m * k * sizeof(float); size_t bytesForP = m * m * sizeof(float); /* Allocate host memory for matrices */ float *h_M, *h_P; float *result = new float[m*m]; int *index = new int[m]; h_M = (float *)malloc(bytesForM); h_P = (float *)malloc(bytesForP); if (h_M == NULL || h_P == NULL) { fprintf(stderr, "Unable to allocate host memory\n"); return EXIT_FAILURE; } /* Allocate device memory for matrices */ float *d_M, *d_P; int *d_index; CUDA_CHECK(cudaMalloc((void **)&d_M, bytesForM)); CUDA_CHECK(cudaMalloc((void **)&d_P, bytesForP)); CUDA_CHECK(cudaMalloc((void **)&d_index, m*sizeof(int))); /* Fill M (on host) */ std::cout << "Loading matrices...\n"; std::ifstream in1, in2; in1.open("descriptor.txt"); for (int i = 0; i < m*k; ++i) in1 >> h_M[i]; in1.close(); printf("M =\n"); printMatrix(h_M, m, k); /* Copy M to device global memory */ CUDA_CHECK(cudaMemcpy(d_M, h_M, bytesForM, cudaMemcpyHostToDevice)); /* Launch the CUDA kernel */ dim3 dimGrid((m+TILE_WIDTH-1)/TILE_WIDTH, (m+TILE_WIDTH-1)/TILE_WIDTH); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); printf("matMul called from host\n"); getDistance_gpu<<<dimGrid, dimBlock>>>(d_M, d_P, m, k); argMin_gpu<<<dimGrid, dimBlock>>>(d_P, d_index, m, k); CUDA_CHECK(cudaDeviceSynchronize()); /* Copy result matrix from device global memory back to host memory */ CUDA_CHECK(cudaMemcpy(h_P, d_P, bytesForP, cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(index, d_index, m*sizeof(int), cudaMemcpyDeviceToHost)); printf(" product received from host\n"); printf("P =\n"); printMatrix(h_P, m, m); std::ofstream out, out2; out2.open("matrix.txt"); for (int i = 0; i < 100; i++){ for (int j = 0; j < m; j++){ if (h_P[i*m+j] < 0.3) out2 << j+1 << " "; } out2 << std::endl; } out2.close(); /* for (int row = 0; row < m; row++) { for (int col = 0; col < m; col++) { getDistance(h_M, result, m, k, col, row); } } printf("\nExpected matrix:\n"); printMatrix(result, m, m); printf("\n"); for (int i = 0; i < m; i++){ printf("%d ", index[i]); } printf("\n"); */ /* Free device global memory */ CUDA_CHECK(cudaFree(d_M)); CUDA_CHECK(cudaFree(d_P)); CUDA_CHECK(cudaFree(d_index)); /* Free host memory */ free(h_M); free(h_P); free(index); free(result); /* Reset the device (unnecessary if not profiling, but good practice) */ CUDA_CHECK(cudaDeviceReset()); printf("Done\n"); system("Pause"); return EXIT_SUCCESS; }
/* * Tiled Matrix Multiplication * (MP2, Fall 2014, GPU Programming/Auburn University) * * Compile with -DTILE_WIDTH=16 (for example) to change the tile size. * Compile with -DSEED=12 (for example) to seed the random number generator. */ #include <assert.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> /* Usage message displayed when invalid command line arguments are supplied */ #define USAGE \ "MP2 generates a random (m x k) matrix M and (k x n) matrix N\n" \ "and multiplies M by N using tiled matrix multiplication.\n" \ "The values of m, k, and n must be >= 1.\n" \ "\n" \ "Usage: mp2 m k n\n" /* Tile size -- define here if not defined using the -D compiler flag */ #ifndef TILE_WIDTH # define TILE_WIDTH 16 #endif /* Seed for the random number generator -- define here if not using -D */ #ifndef SEED # define SEED 1 #endif /* Maximum difference allowed between the GPU and CPU result matrices */ #define EPSILON 1e-2 /* If a CUDA call fails, display an error message and exit */ #define CUDA_CHECK(e) { \ hipError_t err = (e); \ if (err != hipSuccess) \ { \ fprintf(stderr, "CUDA error: %s, line %d, %s: %s\n", \ __FILE__, __LINE__, #e, hipGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } /* assert() is only supported on devices of compute capability >= 2.0 */ #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) # undef assert # define assert(arg) #endif void getDistance(float *h_M, float *h_P, int m, int k, int col, int row){ float expected = 0.0; for (int i = 0; i < k; i++) { expected += pow(h_M[row*k+i] - h_M[col*k+i], 2); } expected = sqrt(expected); h_P[row*m+col] = expected; } void argMin(float* h_M, int* result, int m, int k, int row){ float minimum = 1e5; int pos = -1; for(int col = 0; col < m; col++){ if (h_M[row*m+col] < minimum){ pos = col; minimum = h_M[row*m+col]; } } result[row] = pos; } __global__ static void argMin_gpu(float* h_M, int* result, int m, int k){ assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH); int row = blockIdx.y * TILE_WIDTH + threadIdx.y; if(row >= m) return; float minimum = 1e5; int pos = -1; for(int col = 0; col < m; col++){ if (h_M[row*m+col] < minimum){ pos = col; minimum = h_M[row*m+col]; } } result[row] = pos; } __global__ static void getDistance_gpu(float *d_M, float *d_P, int m, int k) { assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH); int row = blockIdx.y * TILE_WIDTH + threadIdx.y; int col= blockIdx.x * TILE_WIDTH + threadIdx.x; if(row >= m || col >= m) return; if(row == col){ d_P[row*m+col] = 100; return; } float expected = 0.0; for (int i = 0; i < k; i++) { expected += pow(d_M[row*k+i] - d_M[col*k+i], 2); } expected = sqrt(expected); d_P[row*m+col] = expected; } /* Displays one row of the given matrix */ static void printRow(int row, float *matrix, int cols) { printf("["); if (cols >= 1) printf(" %3.3f", matrix[row*cols+0]); if (cols >= 2) printf(" %3.3f", matrix[row*cols+1]); if (cols >= 3) printf(" %3.3f", matrix[row*cols+2]); if (cols >= 6) printf(" ..."); if (cols >= 5) printf(" %3.3f", matrix[row*cols+(cols-2)]); if (cols >= 4) printf(" %3.3f", matrix[row*cols+(cols-1)]); printf(" ]\n"); } /* Displays the given matrix */ static void printMatrix(float *matrix, int rows, int cols) { if (rows >= 1) printRow(0, matrix, cols); if (rows >= 2) printRow(1, matrix, cols); if (rows >= 3) printRow(2, matrix, cols); if (rows >= 6) printf(" ...\n"); if (rows >= 5) printRow(rows-2, matrix, cols); if (rows >= 4) printRow(rows-1, matrix, cols); } /* Program entrypoint. Invoke with three command line arguments: m k n */ int main() { printf("%d, %d, %d, %d\n", sizeof(long), sizeof(long long), sizeof(bool), sizeof(char)); /* Get command line arguments; save as m, k, and n */ int m = 100; int k = 128; if (m < 1 || k < 1) { fprintf(stderr, USAGE); fprintf(stderr, "Invalid value for m or k (%d, %d)\n", m, k); return EXIT_FAILURE; } printf("using (%d x %d) tiles.\n", TILE_WIDTH, TILE_WIDTH); /********************************************/ /* M is (m x k), P is (m x m) */ /********************************************/ /* Compute number of bytes needed to stores matrices M and P */ size_t bytesForM = m * k * sizeof(float); size_t bytesForP = m * m * sizeof(float); /* Allocate host memory for matrices */ float *h_M, *h_P; float *result = new float[m*m]; int *index = new int[m]; h_M = (float *)malloc(bytesForM); h_P = (float *)malloc(bytesForP); if (h_M == NULL || h_P == NULL) { fprintf(stderr, "Unable to allocate host memory\n"); return EXIT_FAILURE; } /* Allocate device memory for matrices */ float *d_M, *d_P; int *d_index; CUDA_CHECK(hipMalloc((void **)&d_M, bytesForM)); CUDA_CHECK(hipMalloc((void **)&d_P, bytesForP)); CUDA_CHECK(hipMalloc((void **)&d_index, m*sizeof(int))); /* Fill M (on host) */ std::cout << "Loading matrices...\n"; std::ifstream in1, in2; in1.open("descriptor.txt"); for (int i = 0; i < m*k; ++i) in1 >> h_M[i]; in1.close(); printf("M =\n"); printMatrix(h_M, m, k); /* Copy M to device global memory */ CUDA_CHECK(hipMemcpy(d_M, h_M, bytesForM, hipMemcpyHostToDevice)); /* Launch the CUDA kernel */ dim3 dimGrid((m+TILE_WIDTH-1)/TILE_WIDTH, (m+TILE_WIDTH-1)/TILE_WIDTH); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); printf("matMul called from host\n"); getDistance_gpu<<<dimGrid, dimBlock>>>(d_M, d_P, m, k); argMin_gpu<<<dimGrid, dimBlock>>>(d_P, d_index, m, k); CUDA_CHECK(hipDeviceSynchronize()); /* Copy result matrix from device global memory back to host memory */ CUDA_CHECK(hipMemcpy(h_P, d_P, bytesForP, hipMemcpyDeviceToHost)); CUDA_CHECK(hipMemcpy(index, d_index, m*sizeof(int), hipMemcpyDeviceToHost)); printf(" product received from host\n"); printf("P =\n"); printMatrix(h_P, m, m); std::ofstream out, out2; out2.open("matrix.txt"); for (int i = 0; i < 100; i++){ for (int j = 0; j < m; j++){ if (h_P[i*m+j] < 0.3) out2 << j+1 << " "; } out2 << std::endl; } out2.close(); /* for (int row = 0; row < m; row++) { for (int col = 0; col < m; col++) { getDistance(h_M, result, m, k, col, row); } } printf("\nExpected matrix:\n"); printMatrix(result, m, m); printf("\n"); for (int i = 0; i < m; i++){ printf("%d ", index[i]); } printf("\n"); */ /* Free device global memory */ CUDA_CHECK(hipFree(d_M)); CUDA_CHECK(hipFree(d_P)); CUDA_CHECK(hipFree(d_index)); /* Free host memory */ free(h_M); free(h_P); free(index); free(result); /* Reset the device (unnecessary if not profiling, but good practice) */ CUDA_CHECK(hipDeviceReset()); printf("Done\n"); system("Pause"); return EXIT_SUCCESS; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/* * Tiled Matrix Multiplication * (MP2, Fall 2014, GPU Programming/Auburn University) * * Compile with -DTILE_WIDTH=16 (for example) to change the tile size. * Compile with -DSEED=12 (for example) to seed the random number generator. */ #include <assert.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> /* Usage message displayed when invalid command line arguments are supplied */ #define USAGE \ "MP2 generates a random (m x k) matrix M and (k x n) matrix N\n" \ "and multiplies M by N using tiled matrix multiplication.\n" \ "The values of m, k, and n must be >= 1.\n" \ "\n" \ "Usage: mp2 m k n\n" /* Tile size -- define here if not defined using the -D compiler flag */ #ifndef TILE_WIDTH # define TILE_WIDTH 16 #endif /* Seed for the random number generator -- define here if not using -D */ #ifndef SEED # define SEED 1 #endif /* Maximum difference allowed between the GPU and CPU result matrices */ #define EPSILON 1e-2 /* If a CUDA call fails, display an error message and exit */ #define CUDA_CHECK(e) { \ hipError_t err = (e); \ if (err != hipSuccess) \ { \ fprintf(stderr, "CUDA error: %s, line %d, %s: %s\n", \ __FILE__, __LINE__, #e, hipGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } /* assert() is only supported on devices of compute capability >= 2.0 */ #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) # undef assert # define assert(arg) #endif void getDistance(float *h_M, float *h_P, int m, int k, int col, int row){ float expected = 0.0; for (int i = 0; i < k; i++) { expected += pow(h_M[row*k+i] - h_M[col*k+i], 2); } expected = sqrt(expected); h_P[row*m+col] = expected; } void argMin(float* h_M, int* result, int m, int k, int row){ float minimum = 1e5; int pos = -1; for(int col = 0; col < m; col++){ if (h_M[row*m+col] < minimum){ pos = col; minimum = h_M[row*m+col]; } } result[row] = pos; } __global__ static void argMin_gpu(float* h_M, int* result, int m, int k){ assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH); int row = blockIdx.y * TILE_WIDTH + threadIdx.y; if(row >= m) return; float minimum = 1e5; int pos = -1; for(int col = 0; col < m; col++){ if (h_M[row*m+col] < minimum){ pos = col; minimum = h_M[row*m+col]; } } result[row] = pos; } __global__ static void getDistance_gpu(float *d_M, float *d_P, int m, int k) { assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH); int row = blockIdx.y * TILE_WIDTH + threadIdx.y; int col= blockIdx.x * TILE_WIDTH + threadIdx.x; if(row >= m || col >= m) return; if(row == col){ d_P[row*m+col] = 100; return; } float expected = 0.0; for (int i = 0; i < k; i++) { expected += pow(d_M[row*k+i] - d_M[col*k+i], 2); } expected = sqrt(expected); d_P[row*m+col] = expected; } /* Displays one row of the given matrix */ static void printRow(int row, float *matrix, int cols) { printf("["); if (cols >= 1) printf(" %3.3f", matrix[row*cols+0]); if (cols >= 2) printf(" %3.3f", matrix[row*cols+1]); if (cols >= 3) printf(" %3.3f", matrix[row*cols+2]); if (cols >= 6) printf(" ..."); if (cols >= 5) printf(" %3.3f", matrix[row*cols+(cols-2)]); if (cols >= 4) printf(" %3.3f", matrix[row*cols+(cols-1)]); printf(" ]\n"); } /* Displays the given matrix */ static void printMatrix(float *matrix, int rows, int cols) { if (rows >= 1) printRow(0, matrix, cols); if (rows >= 2) printRow(1, matrix, cols); if (rows >= 3) printRow(2, matrix, cols); if (rows >= 6) printf(" ...\n"); if (rows >= 5) printRow(rows-2, matrix, cols); if (rows >= 4) printRow(rows-1, matrix, cols); } /* Program entrypoint. Invoke with three command line arguments: m k n */ int main() { printf("%d, %d, %d, %d\n", sizeof(long), sizeof(long long), sizeof(bool), sizeof(char)); /* Get command line arguments; save as m, k, and n */ int m = 100; int k = 128; if (m < 1 || k < 1) { fprintf(stderr, USAGE); fprintf(stderr, "Invalid value for m or k (%d, %d)\n", m, k); return EXIT_FAILURE; } printf("using (%d x %d) tiles.\n", TILE_WIDTH, TILE_WIDTH); /********************************************/ /* M is (m x k), P is (m x m) */ /********************************************/ /* Compute number of bytes needed to stores matrices M and P */ size_t bytesForM = m * k * sizeof(float); size_t bytesForP = m * m * sizeof(float); /* Allocate host memory for matrices */ float *h_M, *h_P; float *result = new float[m*m]; int *index = new int[m]; h_M = (float *)malloc(bytesForM); h_P = (float *)malloc(bytesForP); if (h_M == NULL || h_P == NULL) { fprintf(stderr, "Unable to allocate host memory\n"); return EXIT_FAILURE; } /* Allocate device memory for matrices */ float *d_M, *d_P; int *d_index; CUDA_CHECK(hipMalloc((void **)&d_M, bytesForM)); CUDA_CHECK(hipMalloc((void **)&d_P, bytesForP)); CUDA_CHECK(hipMalloc((void **)&d_index, m*sizeof(int))); /* Fill M (on host) */ std::cout << "Loading matrices...\n"; std::ifstream in1, in2; in1.open("descriptor.txt"); for (int i = 0; i < m*k; ++i) in1 >> h_M[i]; in1.close(); printf("M =\n"); printMatrix(h_M, m, k); /* Copy M to device global memory */ CUDA_CHECK(hipMemcpy(d_M, h_M, bytesForM, hipMemcpyHostToDevice)); /* Launch the CUDA kernel */ dim3 dimGrid((m+TILE_WIDTH-1)/TILE_WIDTH, (m+TILE_WIDTH-1)/TILE_WIDTH); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); printf("matMul called from host\n"); getDistance_gpu<<<dimGrid, dimBlock>>>(d_M, d_P, m, k); argMin_gpu<<<dimGrid, dimBlock>>>(d_P, d_index, m, k); CUDA_CHECK(hipDeviceSynchronize()); /* Copy result matrix from device global memory back to host memory */ CUDA_CHECK(hipMemcpy(h_P, d_P, bytesForP, hipMemcpyDeviceToHost)); CUDA_CHECK(hipMemcpy(index, d_index, m*sizeof(int), hipMemcpyDeviceToHost)); printf(" product received from host\n"); printf("P =\n"); printMatrix(h_P, m, m); std::ofstream out, out2; out2.open("matrix.txt"); for (int i = 0; i < 100; i++){ for (int j = 0; j < m; j++){ if (h_P[i*m+j] < 0.3) out2 << j+1 << " "; } out2 << std::endl; } out2.close(); /* for (int row = 0; row < m; row++) { for (int col = 0; col < m; col++) { getDistance(h_M, result, m, k, col, row); } } printf("\nExpected matrix:\n"); printMatrix(result, m, m); printf("\n"); for (int i = 0; i < m; i++){ printf("%d ", index[i]); } printf("\n"); */ /* Free device global memory */ CUDA_CHECK(hipFree(d_M)); CUDA_CHECK(hipFree(d_P)); CUDA_CHECK(hipFree(d_index)); /* Free host memory */ free(h_M); free(h_P); free(index); free(result); /* Reset the device (unnecessary if not profiling, but good practice) */ CUDA_CHECK(hipDeviceReset()); printf("Done\n"); system("Pause"); return EXIT_SUCCESS; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .section .text._ZL10argMin_gpuPfPiii,"axG",@progbits,_ZL10argMin_gpuPfPiii,comdat .globl _ZL10argMin_gpuPfPiii .p2align 8 .type _ZL10argMin_gpuPfPiii,@function _ZL10argMin_gpuPfPiii: s_load_b32 s3, s[0:1], 0x10 v_bfe_u32 v0, v0, 10, 10 s_mov_b32 s2, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshl_add_u32 v0, s15, 4, v0 s_waitcnt lgkmcnt(0) v_cmpx_gt_i32_e64 s3, v0 s_cbranch_execz .LBB0_6 s_cmp_lt_i32 s3, 1 s_cbranch_scc1 .LBB0_4 s_load_b64 s[4:5], s[0:1], 0x0 v_mul_lo_u32 v1, v0, s3 v_dual_mov_b32 v3, -1 :: v_dual_mov_b32 v4, 0x47c35000 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[1:2], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v1, vcc_lo, s4, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo s_mov_b32 s4, 0 .LBB0_3: global_load_b32 v5, v[1:2], off v_add_co_u32 v1, vcc_lo, v1, 4 v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo s_waitcnt vmcnt(0) v_cmp_lt_f32_e64 s2, v5, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_cndmask_b32_e64 v4, v4, v5, s2 v_cndmask_b32_e64 v3, v3, s4, s2 s_add_i32 s4, s4, 1 s_cmp_eq_u32 s3, s4 s_cbranch_scc0 .LBB0_3 s_branch .LBB0_5 .LBB0_4: v_mov_b32_e32 v3, -1 .LBB0_5: s_load_b64 s[0:1], s[0:1], 0x8 v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v3, off .LBB0_6: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _ZL10argMin_gpuPfPiii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .section .text._ZL10argMin_gpuPfPiii,"axG",@progbits,_ZL10argMin_gpuPfPiii,comdat .Lfunc_end0: .size _ZL10argMin_gpuPfPiii, .Lfunc_end0-_ZL10argMin_gpuPfPiii .section .AMDGPU.csdata,"",@progbits .section .text._ZL15getDistance_gpuPfS_ii,"axG",@progbits,_ZL15getDistance_gpuPfS_ii,comdat .globl _ZL15getDistance_gpuPfS_ii .p2align 8 .type _ZL15getDistance_gpuPfS_ii,@function _ZL15getDistance_gpuPfS_ii: s_load_b32 s3, s[0:1], 0x10 v_bfe_u32 v1, v0, 10, 10 v_and_b32_e32 v0, 0x3ff, v0 s_mov_b32 s2, exec_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshl_add_u32 v5, s15, 4, v1 v_lshl_add_u32 v0, s14, 4, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_max_i32_e32 v1, v5, v0 s_waitcnt lgkmcnt(0) v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB1_8 v_mov_b32_e32 v1, 0x42c80000 s_mov_b32 s4, exec_lo v_cmpx_ne_u32_e64 v5, v0 s_cbranch_execz .LBB1_7 s_load_b32 s2, s[0:1], 0x14 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB1_5 s_load_b64 s[6:7], s[0:1], 0x0 v_mul_lo_u32 v1, v0, s2 v_mul_lo_u32 v3, v5, s2 v_mov_b32_e32 v6, 0 s_mov_b32 s5, 0x3e76c4e1 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_ashrrev_i32_e32 v2, 31, v1 v_ashrrev_i32_e32 v4, 31, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[1:2], 2, v[1:2] v_lshlrev_b64 v[3:4], 2, v[3:4] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v1, vcc_lo, s6, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v3, vcc_lo, s6, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo .LBB1_4: global_load_b32 v7, v[3:4], off global_load_b32 v8, v[1:2], off s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) v_sub_f32_e32 v7, v7, v8 v_frexp_mant_f32_e64 v8, |v7| v_frexp_exp_i32_f32_e32 v9, v7 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_cmp_gt_f32_e32 vcc_lo, 0x3f2aaaab, v8 v_cndmask_b32_e64 v10, 0, 1, vcc_lo v_subrev_co_ci_u32_e32 v9, vcc_lo, 0, v9, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ldexp_f32 v8, v8, v10 v_cvt_f32_i32_e32 v9, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f32_e32 v10, 1.0, v8 v_mul_f32_e32 v14, 0x3f317218, v9 v_cmp_neq_f32_e64 s6, 0x7f800000, |v7| s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_f32_e32 v13, -1.0, v10 v_dual_add_f32 v11, -1.0, v8 :: v_dual_sub_f32 v8, v8, v13 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f32 v13, v9, 0x3f317218, -v14 v_fmac_f32_e32 v13, 0xb102e308, v9 v_rcp_f32_e32 v12, v10 s_waitcnt_depctr 0xfff v_mul_f32_e32 v15, v11, v12 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f32_e32 v9, v10, v15 v_fma_f32 v10, v15, v10, -v9 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmac_f32_e32 v10, v15, v8 v_add_f32_e32 v8, v9, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_f32_e32 v16, v11, v8 v_sub_f32_e32 v11, v11, v16 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_sub_f32 v9, v8, v9 :: v_dual_sub_f32 v8, v11, v8 v_sub_f32_e32 v9, v9, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_f32_e32 v8, v9, v8 v_add_f32_e32 v8, v16, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f32_e32 v8, v12, v8 v_add_f32_e32 v9, v15, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_f32_e32 v10, v9, v15 v_dual_mul_f32 v11, v9, v9 :: v_dual_sub_f32 v8, v8, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_fma_f32 v10, v9, v9, -v11 v_ldexp_f32 v12, v9, 1 v_add_f32_e32 v15, v8, v8 v_ldexp_f32 v16, v8, 1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmac_f32_e32 v10, v9, v15 v_add_f32_e32 v15, v11, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmaak_f32 v17, s5, v15, 0x3e91f4c4 v_fmaak_f32 v17, v15, v17, 0x3ecccdef v_sub_f32_e32 v11, v15, v11 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_dual_mul_f32 v18, v9, v15 :: v_dual_mul_f32 v19, v15, v17 v_sub_f32_e32 v10, v10, v11 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f32 v11, v15, v9, -v18 v_fmac_f32_e32 v11, v15, v8 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f32 v8, v15, v17, -v19 v_fmac_f32_e32 v11, v10, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmac_f32_e32 v8, v10, v17 v_add_f32_e32 v10, v19, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_f32_e32 v17, v10, v19 v_sub_f32_e32 v8, v8, v17 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_add_f32 v8, 0x31739010, v8 :: v_dual_add_f32 v9, v18, v11 v_sub_f32_e32 v15, v9, v18 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_add_f32 v18, 0x3f2aaaaa, v10 :: v_dual_sub_f32 v11, v11, v15 v_add_f32_e32 v15, 0xbf2aaaaa, v18 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_f32_e32 v10, v10, v15 v_add_f32_e32 v8, v8, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_f32_e32 v10, v18, v8 v_sub_f32_e32 v15, v18, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_mul_f32 v17, v9, v10 :: v_dual_add_f32 v8, v8, v15 v_fma_f32 v15, v9, v10, -v17 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmac_f32_e32 v15, v9, v8 v_fmac_f32_e32 v15, v11, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_f32_e32 v8, v17, v15 v_add_f32_e32 v9, v12, v8 v_sub_f32_e32 v10, v8, v17 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_sub_f32 v10, v15, v10 :: v_dual_sub_f32 v11, v9, v12 v_add_f32_e32 v10, v16, v10 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_sub_f32 v8, v8, v11 :: v_dual_add_f32 v11, v14, v13 v_add_f32_e32 v8, v10, v8 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_sub_f32_e32 v10, v11, v14 v_add_f32_e32 v12, v9, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_sub_f32 v10, v13, v10 :: v_dual_add_f32 v13, v11, v12 v_dual_sub_f32 v9, v12, v9 :: v_dual_sub_f32 v14, v13, v11 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_sub_f32 v8, v8, v9 :: v_dual_sub_f32 v9, v13, v14 v_dual_sub_f32 v9, v11, v9 :: v_dual_sub_f32 v12, v12, v14 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_add_f32 v14, v10, v8 :: v_dual_add_f32 v9, v12, v9 v_sub_f32_e32 v11, v14, v10 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f32_e32 v9, v14, v9 v_sub_f32_e32 v12, v14, v11 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_dual_sub_f32 v8, v8, v11 :: v_dual_add_f32 v11, v13, v9 v_sub_f32_e32 v10, v10, v12 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_f32_e32 v12, v11, v13 v_dual_sub_f32 v9, v9, v12 :: v_dual_add_f32 v8, v8, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_f32_e32 v8, v8, v9 v_add_f32_e32 v9, v11, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_f32_e32 v10, v9, v11 v_dual_sub_f32 v8, v8, v10 :: v_dual_add_f32 v11, v9, v9 v_mul_f32_e32 v12, 0, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f32 v9, v9, 2.0, -v11 v_fmac_f32_e32 v12, 2.0, v8 v_cmp_class_f32_e64 vcc_lo, v11, 0x204 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_f32_e32 v8, v9, v12 v_add_f32_e32 v9, v11, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v10, v9, v11, vcc_lo v_sub_f32_e32 v9, v9, v11 v_cmp_eq_f32_e32 vcc_lo, 0x42b17218, v10 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) v_sub_f32_e32 v8, v8, v9 v_cndmask_b32_e64 v12, 0, 0x37000000, vcc_lo v_cmp_neq_f32_e64 vcc_lo, 0x7f800000, |v10| v_sub_f32_e32 v13, v10, v12 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v8, 0, v8, vcc_lo v_mul_f32_e32 v14, 0x3fb8aa3b, v13 v_cmp_ngt_f32_e32 vcc_lo, 0xc2ce8ed0, v13 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_f32_e32 v8, v12, v8 v_fma_f32 v15, v13, 0x3fb8aa3b, -v14 v_rndne_f32_e32 v16, v14 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_dual_fmac_f32 v15, 0x32a5705f, v13 :: v_dual_sub_f32 v14, v14, v16 v_cvt_i32_f32_e32 v11, v16 v_add_f32_e32 v14, v14, v15 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_exp_f32_e32 v14, v14 s_waitcnt_depctr 0xfff v_ldexp_f32 v9, v14, v11 v_cndmask_b32_e32 v9, 0, v9, vcc_lo v_cmp_nlt_f32_e32 vcc_lo, 0x42b17218, v13 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v9, 0x7f800000, v9, vcc_lo v_fma_f32 v8, v9, v8, v9 v_cmp_eq_f32_e32 vcc_lo, 0x7f800000, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) v_cndmask_b32_e32 v8, v8, v9, vcc_lo v_add_co_u32 v1, vcc_lo, v1, 4 v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo v_cndmask_b32_e64 v8, 0x7f800000, |v8|, s6 v_cmp_neq_f32_e32 vcc_lo, 0, v7 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) v_cndmask_b32_e32 v7, 0, v8, vcc_lo v_add_co_u32 v3, vcc_lo, v3, 4 v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo v_add_f32_e32 v6, v6, v7 s_cbranch_scc0 .LBB1_4 s_branch .LBB1_6 .LBB1_5: v_mov_b32_e32 v6, 0 .LBB1_6: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v6 v_mul_f32_e32 v1, 0x4f800000, v6 v_cndmask_b32_e32 v1, v6, v1, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2) v_sqrt_f32_e32 v2, v1 s_waitcnt_depctr 0xfff v_add_nc_u32_e32 v4, 1, v2 v_add_nc_u32_e32 v3, -1, v2 v_fma_f32 v7, -v4, v2, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f32 v6, -v3, v2, v1 v_cmp_ge_f32_e64 s2, 0, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4) v_cndmask_b32_e64 v2, v2, v3, s2 v_cmp_lt_f32_e64 s2, 0, v7 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e64 v2, v2, v4, s2 v_mul_f32_e32 v3, 0x37800000, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v2, v2, v3, vcc_lo v_cmp_class_f32_e64 vcc_lo, v1, 0x260 v_cndmask_b32_e32 v1, v2, v1, vcc_lo .LBB1_7: s_or_b32 exec_lo, exec_lo, s4 s_load_b64 s[0:1], s[0:1], 0x8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[2:3], null, v5, s3, v[0:1] v_ashrrev_i32_e32 v3, 31, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[2:3] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo global_store_b32 v[2:3], v1, off .LBB1_8: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _ZL15getDistance_gpuPfS_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 20 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .section .text._ZL15getDistance_gpuPfS_ii,"axG",@progbits,_ZL15getDistance_gpuPfS_ii,comdat .Lfunc_end1: .size _ZL15getDistance_gpuPfS_ii, .Lfunc_end1-_ZL15getDistance_gpuPfS_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _ZL10argMin_gpuPfPiii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _ZL10argMin_gpuPfPiii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _ZL15getDistance_gpuPfS_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _ZL15getDistance_gpuPfS_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 20 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* * Tiled Matrix Multiplication * (MP2, Fall 2014, GPU Programming/Auburn University) * * Compile with -DTILE_WIDTH=16 (for example) to change the tile size. * Compile with -DSEED=12 (for example) to seed the random number generator. */ #include <assert.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> /* Usage message displayed when invalid command line arguments are supplied */ #define USAGE \ "MP2 generates a random (m x k) matrix M and (k x n) matrix N\n" \ "and multiplies M by N using tiled matrix multiplication.\n" \ "The values of m, k, and n must be >= 1.\n" \ "\n" \ "Usage: mp2 m k n\n" /* Tile size -- define here if not defined using the -D compiler flag */ #ifndef TILE_WIDTH # define TILE_WIDTH 16 #endif /* Seed for the random number generator -- define here if not using -D */ #ifndef SEED # define SEED 1 #endif /* Maximum difference allowed between the GPU and CPU result matrices */ #define EPSILON 1e-2 /* If a CUDA call fails, display an error message and exit */ #define CUDA_CHECK(e) { \ hipError_t err = (e); \ if (err != hipSuccess) \ { \ fprintf(stderr, "CUDA error: %s, line %d, %s: %s\n", \ __FILE__, __LINE__, #e, hipGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } /* assert() is only supported on devices of compute capability >= 2.0 */ #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) # undef assert # define assert(arg) #endif void getDistance(float *h_M, float *h_P, int m, int k, int col, int row){ float expected = 0.0; for (int i = 0; i < k; i++) { expected += pow(h_M[row*k+i] - h_M[col*k+i], 2); } expected = sqrt(expected); h_P[row*m+col] = expected; } void argMin(float* h_M, int* result, int m, int k, int row){ float minimum = 1e5; int pos = -1; for(int col = 0; col < m; col++){ if (h_M[row*m+col] < minimum){ pos = col; minimum = h_M[row*m+col]; } } result[row] = pos; } __global__ static void argMin_gpu(float* h_M, int* result, int m, int k){ assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH); int row = blockIdx.y * TILE_WIDTH + threadIdx.y; if(row >= m) return; float minimum = 1e5; int pos = -1; for(int col = 0; col < m; col++){ if (h_M[row*m+col] < minimum){ pos = col; minimum = h_M[row*m+col]; } } result[row] = pos; } __global__ static void getDistance_gpu(float *d_M, float *d_P, int m, int k) { assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH); int row = blockIdx.y * TILE_WIDTH + threadIdx.y; int col= blockIdx.x * TILE_WIDTH + threadIdx.x; if(row >= m || col >= m) return; if(row == col){ d_P[row*m+col] = 100; return; } float expected = 0.0; for (int i = 0; i < k; i++) { expected += pow(d_M[row*k+i] - d_M[col*k+i], 2); } expected = sqrt(expected); d_P[row*m+col] = expected; } /* Displays one row of the given matrix */ static void printRow(int row, float *matrix, int cols) { printf("["); if (cols >= 1) printf(" %3.3f", matrix[row*cols+0]); if (cols >= 2) printf(" %3.3f", matrix[row*cols+1]); if (cols >= 3) printf(" %3.3f", matrix[row*cols+2]); if (cols >= 6) printf(" ..."); if (cols >= 5) printf(" %3.3f", matrix[row*cols+(cols-2)]); if (cols >= 4) printf(" %3.3f", matrix[row*cols+(cols-1)]); printf(" ]\n"); } /* Displays the given matrix */ static void printMatrix(float *matrix, int rows, int cols) { if (rows >= 1) printRow(0, matrix, cols); if (rows >= 2) printRow(1, matrix, cols); if (rows >= 3) printRow(2, matrix, cols); if (rows >= 6) printf(" ...\n"); if (rows >= 5) printRow(rows-2, matrix, cols); if (rows >= 4) printRow(rows-1, matrix, cols); } /* Program entrypoint. Invoke with three command line arguments: m k n */ int main() { printf("%d, %d, %d, %d\n", sizeof(long), sizeof(long long), sizeof(bool), sizeof(char)); /* Get command line arguments; save as m, k, and n */ int m = 100; int k = 128; if (m < 1 || k < 1) { fprintf(stderr, USAGE); fprintf(stderr, "Invalid value for m or k (%d, %d)\n", m, k); return EXIT_FAILURE; } printf("using (%d x %d) tiles.\n", TILE_WIDTH, TILE_WIDTH); /********************************************/ /* M is (m x k), P is (m x m) */ /********************************************/ /* Compute number of bytes needed to stores matrices M and P */ size_t bytesForM = m * k * sizeof(float); size_t bytesForP = m * m * sizeof(float); /* Allocate host memory for matrices */ float *h_M, *h_P; float *result = new float[m*m]; int *index = new int[m]; h_M = (float *)malloc(bytesForM); h_P = (float *)malloc(bytesForP); if (h_M == NULL || h_P == NULL) { fprintf(stderr, "Unable to allocate host memory\n"); return EXIT_FAILURE; } /* Allocate device memory for matrices */ float *d_M, *d_P; int *d_index; CUDA_CHECK(hipMalloc((void **)&d_M, bytesForM)); CUDA_CHECK(hipMalloc((void **)&d_P, bytesForP)); CUDA_CHECK(hipMalloc((void **)&d_index, m*sizeof(int))); /* Fill M (on host) */ std::cout << "Loading matrices...\n"; std::ifstream in1, in2; in1.open("descriptor.txt"); for (int i = 0; i < m*k; ++i) in1 >> h_M[i]; in1.close(); printf("M =\n"); printMatrix(h_M, m, k); /* Copy M to device global memory */ CUDA_CHECK(hipMemcpy(d_M, h_M, bytesForM, hipMemcpyHostToDevice)); /* Launch the CUDA kernel */ dim3 dimGrid((m+TILE_WIDTH-1)/TILE_WIDTH, (m+TILE_WIDTH-1)/TILE_WIDTH); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); printf("matMul called from host\n"); getDistance_gpu<<<dimGrid, dimBlock>>>(d_M, d_P, m, k); argMin_gpu<<<dimGrid, dimBlock>>>(d_P, d_index, m, k); CUDA_CHECK(hipDeviceSynchronize()); /* Copy result matrix from device global memory back to host memory */ CUDA_CHECK(hipMemcpy(h_P, d_P, bytesForP, hipMemcpyDeviceToHost)); CUDA_CHECK(hipMemcpy(index, d_index, m*sizeof(int), hipMemcpyDeviceToHost)); printf(" product received from host\n"); printf("P =\n"); printMatrix(h_P, m, m); std::ofstream out, out2; out2.open("matrix.txt"); for (int i = 0; i < 100; i++){ for (int j = 0; j < m; j++){ if (h_P[i*m+j] < 0.3) out2 << j+1 << " "; } out2 << std::endl; } out2.close(); /* for (int row = 0; row < m; row++) { for (int col = 0; col < m; col++) { getDistance(h_M, result, m, k, col, row); } } printf("\nExpected matrix:\n"); printMatrix(result, m, m); printf("\n"); for (int i = 0; i < m; i++){ printf("%d ", index[i]); } printf("\n"); */ /* Free device global memory */ CUDA_CHECK(hipFree(d_M)); CUDA_CHECK(hipFree(d_P)); CUDA_CHECK(hipFree(d_index)); /* Free host memory */ free(h_M); free(h_P); free(index); free(result); /* Reset the device (unnecessary if not profiling, but good practice) */ CUDA_CHECK(hipDeviceReset()); printf("Done\n"); system("Pause"); return EXIT_SUCCESS; }
.text .file "mp3.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z11getDistancePfS_iiii # -- Begin function _Z11getDistancePfS_iiii .p2align 4, 0x90 .type _Z11getDistancePfS_iiii,@function _Z11getDistancePfS_iiii: # @_Z11getDistancePfS_iiii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %r9d, %ebx movl %r8d, %ebp movl %edx, %r15d movq %rsi, %r14 xorps %xmm1, %xmm1 xorps %xmm0, %xmm0 testl %ecx, %ecx jle .LBB0_3 # %bb.1: # %.lr.ph movl %ebx, %eax imull %ecx, %eax movl %ebp, %edx imull %ecx, %edx movslq %eax, %rsi movslq %edx, %rdx movl %ecx, %eax leaq (%rdi,%rsi,4), %rcx leaq (%rdi,%rdx,4), %rdx xorl %esi, %esi .p2align 4, 0x90 .LBB0_2: # =>This Inner Loop Header: Depth=1 movss (%rcx,%rsi,4), %xmm2 # xmm2 = mem[0],zero,zero,zero subss (%rdx,%rsi,4), %xmm2 cvtss2sd %xmm2, %xmm2 mulsd %xmm2, %xmm2 cvtss2sd %xmm0, %xmm0 addsd %xmm2, %xmm0 cvtsd2ss %xmm0, %xmm0 incq %rsi cmpq %rsi, %rax jne .LBB0_2 .LBB0_3: # %._crit_edge ucomiss %xmm1, %xmm0 jb .LBB0_5 # %bb.4: sqrtss %xmm0, %xmm0 jmp .LBB0_6 .LBB0_5: # %call.sqrt callq sqrtf .LBB0_6: # %._crit_edge.split imull %r15d, %ebx addl %ebp, %ebx movslq %ebx, %rax movss %xmm0, (%r14,%rax,4) addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z11getDistancePfS_iiii, .Lfunc_end0-_Z11getDistancePfS_iiii .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z6argMinPfPiiii .LCPI1_0: .long 0x47c35000 # float 1.0E+5 .text .globl _Z6argMinPfPiiii .p2align 4, 0x90 .type _Z6argMinPfPiiii,@function _Z6argMinPfPiiii: # @_Z6argMinPfPiiii .cfi_startproc # %bb.0: testl %edx, %edx jle .LBB1_1 # %bb.3: # %.lr.ph movl %r8d, %eax imull %edx, %eax cltq movl %edx, %ecx leaq (%rdi,%rax,4), %rdx movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero movl $-1, %eax xorl %edi, %edi .p2align 4, 0x90 .LBB1_4: # =>This Inner Loop Header: Depth=1 movss (%rdx,%rdi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero ucomiss %xmm1, %xmm0 cmoval %edi, %eax minss %xmm0, %xmm1 incq %rdi movaps %xmm1, %xmm0 cmpq %rdi, %rcx jne .LBB1_4 jmp .LBB1_2 .LBB1_1: movl $-1, %eax .LBB1_2: # %._crit_edge movslq %r8d, %rcx movl %eax, (%rsi,%rcx,4) retq .Lfunc_end1: .size _Z6argMinPfPiiii, .Lfunc_end1-_Z6argMinPfPiiii .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI2_0: .quad 0x3fd3333333333333 # double 0.29999999999999999 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .Lfunc_begin0: .cfi_startproc .cfi_personality 3, __gxx_personality_v0 .cfi_lsda 3, .Lexception0 # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $2168, %rsp # imm = 0x878 .cfi_def_cfa_offset 2224 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 .cfi_escape 0x2e, 0x00 movl $.L.str, %edi movl $8, %esi movl $8, %edx movl $1, %ecx movl $1, %r8d xorl %eax, %eax callq printf .cfi_escape 0x2e, 0x00 movl $.L.str.3, %edi movl $16, %esi movl $16, %edx xorl %eax, %eax callq printf .cfi_escape 0x2e, 0x00 movl $40000, %edi # imm = 0x9C40 callq _Znam movq %rax, %r12 .cfi_escape 0x2e, 0x00 movl $400, %edi # imm = 0x190 callq _Znam movq %rax, 88(%rsp) # 8-byte Spill .cfi_escape 0x2e, 0x00 movl $51200, %edi # imm = 0xC800 callq malloc movq %rax, %r15 .cfi_escape 0x2e, 0x00 movl $40000, %edi # imm = 0x9C40 callq malloc testq %r15, %r15 je .LBB2_2 # %bb.1: movq %rax, %rbx testq %rax, %rax je .LBB2_2 # %bb.3: .cfi_escape 0x2e, 0x00 leaq 24(%rsp), %rdi movl $51200, %esi # imm = 0xC800 callq hipMalloc testl %eax, %eax jne .LBB2_4 # %bb.5: .cfi_escape 0x2e, 0x00 leaq 8(%rsp), %rdi movl $40000, %esi # imm = 0x9C40 callq hipMalloc testl %eax, %eax jne .LBB2_6 # %bb.7: .cfi_escape 0x2e, 0x00 leaq 16(%rsp), %rdi movl $400, %esi # imm = 0x190 callq hipMalloc testl %eax, %eax jne .LBB2_8 # %bb.9: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.10, %esi movl $20, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .cfi_escape 0x2e, 0x00 leaq 1128(%rsp), %rbp movq %rbp, %rdi callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1Ev .Ltmp0: .cfi_escape 0x2e, 0x00 leaq 1648(%rsp), %rdi callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1Ev .Ltmp1: # %bb.10: leaq 1144(%rsp), %r13 .Ltmp3: .cfi_escape 0x2e, 0x00 movl $.L.str.11, %esi movq %r13, %rdi movl $8, %edx callq _ZNSt13basic_filebufIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode .Ltmp4: # %bb.11: # %.noexc movq 1128(%rsp), %rcx addq -24(%rcx), %rbp xorl %esi, %esi testq %rax, %rax jne .LBB2_13 # %bb.12: movl 32(%rbp), %esi orl $4, %esi .LBB2_13: # %.invoke .Ltmp5: .cfi_escape 0x2e, 0x00 movq %rbp, %rdi callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate .Ltmp6: # %bb.14: # %_ZNSt14basic_ifstreamIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode.exit.preheader movq %rbx, 80(%rsp) # 8-byte Spill xorl %r14d, %r14d leaq 1128(%rsp), %rbx .p2align 4, 0x90 .LBB2_15: # %_ZNSt14basic_ifstreamIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode.exit # =>This Inner Loop Header: Depth=1 leaq (%r15,%r14), %rsi .Ltmp7: .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _ZNSi10_M_extractIfEERSiRT_ .Ltmp8: # %bb.16: # %_ZNSirsERf.exit # in Loop: Header=BB2_15 Depth=1 addq $4, %r14 cmpq $51200, %r14 # imm = 0xC800 jne .LBB2_15 # %bb.17: .Ltmp10: .cfi_escape 0x2e, 0x00 movq %r13, %rdi callq _ZNSt13basic_filebufIcSt11char_traitsIcEE5closeEv .Ltmp11: movq 80(%rsp), %rbx # 8-byte Reload # %bb.18: # %.noexc150 testq %rax, %rax jne .LBB2_20 # %bb.19: movq 1128(%rsp), %rax movq -24(%rax), %rax leaq (%rsp,%rax), %rdi addq $1128, %rdi # imm = 0x468 movl 1160(%rsp,%rax), %esi orl $4, %esi .Ltmp12: .cfi_escape 0x2e, 0x00 callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate .Ltmp13: .LBB2_20: # %_ZNSt14basic_ifstreamIcSt11char_traitsIcEE5closeEv.exit .cfi_escape 0x2e, 0x00 movl $.Lstr, %edi callq puts@PLT .cfi_escape 0x2e, 0x00 movq %r15, %rdi movl $128, %esi callq _ZL11printMatrixPfii movq 24(%rsp), %rdi .Ltmp15: .cfi_escape 0x2e, 0x00 movl $51200, %edx # imm = 0xC800 movq %r15, %rsi movl $1, %ecx callq hipMemcpy .Ltmp16: # %bb.21: testl %eax, %eax jne .LBB2_22 # %bb.29: .cfi_escape 0x2e, 0x00 movl $.Lstr.1, %edi callq puts@PLT .Ltmp20: .cfi_escape 0x2e, 0x00 movabsq $30064771079, %rdi # imm = 0x700000007 movabsq $68719476752, %rdx # imm = 0x1000000010 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration .Ltmp21: # %bb.30: testl %eax, %eax jne .LBB2_33 # %bb.31: movq 24(%rsp), %rax movq 8(%rsp), %rcx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) movl $100, 4(%rsp) movl $128, (%rsp) leaq 72(%rsp), %rax movq %rax, 608(%rsp) leaq 64(%rsp), %rax movq %rax, 616(%rsp) leaq 4(%rsp), %rax movq %rax, 624(%rsp) movq %rsp, %rax movq %rax, 632(%rsp) .Ltmp22: .cfi_escape 0x2e, 0x00 leaq 96(%rsp), %rdi leaq 48(%rsp), %rsi leaq 40(%rsp), %rdx leaq 32(%rsp), %rcx callq __hipPopCallConfiguration .Ltmp23: # %bb.32: # %.noexc153 movq 96(%rsp), %rsi movl 104(%rsp), %edx movq 48(%rsp), %rcx movl 56(%rsp), %r8d .Ltmp24: .cfi_escape 0x2e, 0x10 leaq 608(%rsp), %r9 movl $_ZL15getDistance_gpuPfS_ii, %edi pushq 32(%rsp) .cfi_adjust_cfa_offset 8 pushq 48(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .Ltmp25: .LBB2_33: .Ltmp26: .cfi_escape 0x2e, 0x00 movabsq $30064771079, %rdi # imm = 0x700000007 movabsq $68719476752, %rdx # imm = 0x1000000010 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration .Ltmp27: # %bb.34: testl %eax, %eax jne .LBB2_37 # %bb.35: movq 8(%rsp), %rax movq 16(%rsp), %rcx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) movl $100, 4(%rsp) movl $128, (%rsp) leaq 72(%rsp), %rax movq %rax, 608(%rsp) leaq 64(%rsp), %rax movq %rax, 616(%rsp) leaq 4(%rsp), %rax movq %rax, 624(%rsp) movq %rsp, %rax movq %rax, 632(%rsp) .Ltmp28: .cfi_escape 0x2e, 0x00 leaq 96(%rsp), %rdi leaq 48(%rsp), %rsi leaq 40(%rsp), %rdx leaq 32(%rsp), %rcx callq __hipPopCallConfiguration .Ltmp29: # %bb.36: # %.noexc161 movq 96(%rsp), %rsi movl 104(%rsp), %edx movq 48(%rsp), %rcx movl 56(%rsp), %r8d .Ltmp30: .cfi_escape 0x2e, 0x10 leaq 608(%rsp), %r9 movl $_ZL10argMin_gpuPfPiii, %edi pushq 32(%rsp) .cfi_adjust_cfa_offset 8 pushq 48(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .Ltmp31: .LBB2_37: .Ltmp33: .cfi_escape 0x2e, 0x00 callq hipDeviceSynchronize .Ltmp34: # %bb.38: testl %eax, %eax jne .LBB2_39 # %bb.42: movq 8(%rsp), %rsi .Ltmp38: .cfi_escape 0x2e, 0x00 movl $40000, %edx # imm = 0x9C40 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy .Ltmp39: # %bb.43: testl %eax, %eax jne .LBB2_44 # %bb.47: movq 16(%rsp), %rsi .Ltmp43: .cfi_escape 0x2e, 0x00 movl $400, %edx # imm = 0x190 movq 88(%rsp), %rdi # 8-byte Reload movl $2, %ecx callq hipMemcpy .Ltmp44: # %bb.48: testl %eax, %eax jne .LBB2_49 # %bb.52: .cfi_escape 0x2e, 0x00 movl $.Lstr.2, %edi callq puts@PLT .cfi_escape 0x2e, 0x00 movl $.Lstr.3, %edi callq puts@PLT .cfi_escape 0x2e, 0x00 movq %rbx, %rdi movl $100, %esi callq _ZL11printMatrixPfii .Ltmp48: .cfi_escape 0x2e, 0x00 leaq 608(%rsp), %rdi callq _ZNSt14basic_ofstreamIcSt11char_traitsIcEEC1Ev .Ltmp49: # %bb.53: .Ltmp51: .cfi_escape 0x2e, 0x00 leaq 96(%rsp), %rbp movq %rbp, %rdi callq _ZNSt14basic_ofstreamIcSt11char_traitsIcEEC1Ev .Ltmp52: # %bb.54: leaq 104(%rsp), %rdi .Ltmp54: .cfi_escape 0x2e, 0x00 movl $.L.str.20, %esi movl $16, %edx callq _ZNSt13basic_filebufIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode .Ltmp55: # %bb.55: # %.noexc164 movq 96(%rsp), %rcx addq -24(%rcx), %rbp xorl %esi, %esi testq %rax, %rax jne .LBB2_57 # %bb.56: movl 32(%rbp), %esi orl $4, %esi .LBB2_57: # %.invoke195 .Ltmp56: .cfi_escape 0x2e, 0x00 movq %rbp, %rdi callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate .Ltmp57: # %bb.58: # %_ZNSt14basic_ofstreamIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode.exit movq %r12, %r13 xorl %r14d, %r14d leaq 96(%rsp), %rbp movq %rbx, %r12 .p2align 4, 0x90 .LBB2_59: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB2_60 Depth 2 xorl %ebx, %ebx jmp .LBB2_60 .p2align 4, 0x90 .LBB2_63: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit # in Loop: Header=BB2_60 Depth=2 incq %rbx cmpq $100, %rbx je .LBB2_64 .LBB2_60: # Parent Loop BB2_59 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r12,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movsd .LCPI2_0(%rip), %xmm1 # xmm1 = mem[0],zero ucomisd %xmm0, %xmm1 jbe .LBB2_63 # %bb.61: # in Loop: Header=BB2_60 Depth=2 leal 1(%rbx), %esi .Ltmp58: .cfi_escape 0x2e, 0x00 movq %rbp, %rdi callq _ZNSolsEi .Ltmp59: # %bb.62: # in Loop: Header=BB2_60 Depth=2 .Ltmp60: .cfi_escape 0x2e, 0x00 movl $.L.str.21, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp61: jmp .LBB2_63 .p2align 4, 0x90 .LBB2_64: # in Loop: Header=BB2_59 Depth=1 movq 96(%rsp), %rax movq -24(%rax), %rax movq 336(%rsp,%rax), %rbx testq %rbx, %rbx je .LBB2_65 # %bb.76: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i # in Loop: Header=BB2_59 Depth=1 cmpb $0, 56(%rbx) je .LBB2_78 # %bb.77: # in Loop: Header=BB2_59 Depth=1 movzbl 67(%rbx), %eax jmp .LBB2_80 .p2align 4, 0x90 .LBB2_78: # in Loop: Header=BB2_59 Depth=1 .Ltmp63: .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp64: # %bb.79: # %.noexc173 # in Loop: Header=BB2_59 Depth=1 movq (%rbx), %rax .Ltmp65: .cfi_escape 0x2e, 0x00 movq %rbx, %rdi movl $10, %esi callq *48(%rax) .Ltmp66: .LBB2_80: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i # in Loop: Header=BB2_59 Depth=1 .Ltmp67: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movq %rbp, %rdi callq _ZNSo3putEc .Ltmp68: # %bb.81: # %.noexc175 # in Loop: Header=BB2_59 Depth=1 .Ltmp69: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp70: # %bb.82: # %_ZNSolsEPFRSoS_E.exit # in Loop: Header=BB2_59 Depth=1 incq %r14 addq $400, %r12 # imm = 0x190 cmpq $100, %r14 jne .LBB2_59 # %bb.66: .Ltmp72: .cfi_escape 0x2e, 0x00 leaq 104(%rsp), %rdi callq _ZNSt13basic_filebufIcSt11char_traitsIcEE5closeEv .Ltmp73: movq 80(%rsp), %r14 # 8-byte Reload # %bb.67: # %.noexc168 testq %rax, %rax jne .LBB2_69 # %bb.68: movq 96(%rsp), %rax movq -24(%rax), %rax leaq (%rsp,%rax), %rdi addq $96, %rdi movl 128(%rsp,%rax), %esi orl $4, %esi .Ltmp74: .cfi_escape 0x2e, 0x00 callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate .Ltmp75: .LBB2_69: # %_ZNSt14basic_ofstreamIcSt11char_traitsIcEE5closeEv.exit movq 24(%rsp), %rdi .Ltmp76: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp77: # %bb.70: testl %eax, %eax jne .LBB2_71 # %bb.87: movq 8(%rsp), %rdi .Ltmp81: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp82: # %bb.88: testl %eax, %eax jne .LBB2_89 # %bb.92: movq 16(%rsp), %rdi .Ltmp86: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp87: # %bb.93: testl %eax, %eax jne .LBB2_94 # %bb.97: .cfi_escape 0x2e, 0x00 movq %r15, %rdi callq free .cfi_escape 0x2e, 0x00 movq %r14, %rdi callq free .cfi_escape 0x2e, 0x00 movq 88(%rsp), %rdi # 8-byte Reload callq free .cfi_escape 0x2e, 0x00 movq %r13, %rdi callq free .Ltmp91: .cfi_escape 0x2e, 0x00 callq hipDeviceReset .Ltmp92: # %bb.98: testl %eax, %eax jne .LBB2_99 # %bb.102: .cfi_escape 0x2e, 0x00 movl $.Lstr.4, %edi callq puts@PLT .Ltmp96: .cfi_escape 0x2e, 0x00 movl $.L.str.27, %edi callq system .Ltmp97: # %bb.103: .cfi_escape 0x2e, 0x00 leaq 96(%rsp), %rdi callq _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev .cfi_escape 0x2e, 0x00 leaq 608(%rsp), %rdi callq _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev .cfi_escape 0x2e, 0x00 leaq 1648(%rsp), %rdi movl $_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE, %esi callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev leaq 1904(%rsp), %rdi .cfi_escape 0x2e, 0x00 callq _ZNSt8ios_baseD2Ev .cfi_escape 0x2e, 0x00 leaq 1128(%rsp), %rdi movl $_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE, %esi callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev leaq 1384(%rsp), %rdi .cfi_escape 0x2e, 0x00 callq _ZNSt8ios_baseD2Ev xorl %eax, %eax .LBB2_104: addq $2168, %rsp # imm = 0x878 .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB2_2: .cfi_def_cfa_offset 2224 movq stderr(%rip), %rcx .cfi_escape 0x2e, 0x00 movl $.L.str.4, %edi movl $31, %esi movl $1, %edx callq fwrite@PLT movl $1, %eax jmp .LBB2_104 .LBB2_65: .Ltmp99: .cfi_escape 0x2e, 0x00 callq _ZSt16__throw_bad_castv .Ltmp100: # %bb.75: # %.noexc172 .LBB2_4: movq stderr(%rip), %rbx .cfi_escape 0x2e, 0x00 movl %eax, %edi callq hipGetErrorString .cfi_escape 0x2e, 0x00 movl $.L.str.5, %esi movl $.L.str.6, %edx movl $.L.str.7, %r8d movq %rbx, %rdi movl $178, %ecx jmp .LBB2_24 .LBB2_6: movq stderr(%rip), %rbx .cfi_escape 0x2e, 0x00 movl %eax, %edi callq hipGetErrorString .cfi_escape 0x2e, 0x00 movl $.L.str.5, %esi movl $.L.str.6, %edx movl $.L.str.8, %r8d movq %rbx, %rdi movl $179, %ecx jmp .LBB2_24 .LBB2_8: movq stderr(%rip), %rbx .cfi_escape 0x2e, 0x00 movl %eax, %edi callq hipGetErrorString .cfi_escape 0x2e, 0x00 movl $.L.str.5, %esi movl $.L.str.6, %edx movl $.L.str.9, %r8d movq %rbx, %rdi movl $180, %ecx jmp .LBB2_24 .LBB2_22: movq stderr(%rip), %rbx .Ltmp17: .cfi_escape 0x2e, 0x00 movl %eax, %edi callq hipGetErrorString .Ltmp18: # %bb.23: .cfi_escape 0x2e, 0x00 movl $.L.str.5, %esi movl $.L.str.6, %edx movl $.L.str.13, %r8d movq %rbx, %rdi movl $197, %ecx jmp .LBB2_24 .LBB2_39: movq stderr(%rip), %rbx .Ltmp35: .cfi_escape 0x2e, 0x00 movl %eax, %edi callq hipGetErrorString .Ltmp36: # %bb.40: .cfi_escape 0x2e, 0x00 movl $.L.str.5, %esi movl $.L.str.6, %edx movl $.L.str.15, %r8d movq %rbx, %rdi movl $207, %ecx jmp .LBB2_24 .LBB2_44: movq stderr(%rip), %rbx .Ltmp40: .cfi_escape 0x2e, 0x00 movl %eax, %edi callq hipGetErrorString .Ltmp41: # %bb.45: .cfi_escape 0x2e, 0x00 movl $.L.str.5, %esi movl $.L.str.6, %edx movl $.L.str.16, %r8d movq %rbx, %rdi movl $210, %ecx jmp .LBB2_24 .LBB2_49: movq stderr(%rip), %rbx .Ltmp45: .cfi_escape 0x2e, 0x00 movl %eax, %edi callq hipGetErrorString .Ltmp46: # %bb.50: .cfi_escape 0x2e, 0x00 movl $.L.str.5, %esi movl $.L.str.6, %edx movl $.L.str.17, %r8d movq %rbx, %rdi movl $211, %ecx jmp .LBB2_24 .LBB2_71: movq stderr(%rip), %rbx .Ltmp78: .cfi_escape 0x2e, 0x00 movl %eax, %edi callq hipGetErrorString .Ltmp79: # %bb.72: .cfi_escape 0x2e, 0x00 movl $.L.str.5, %esi movl $.L.str.6, %edx movl $.L.str.22, %r8d movq %rbx, %rdi movl $249, %ecx jmp .LBB2_24 .LBB2_89: movq stderr(%rip), %rbx .Ltmp83: .cfi_escape 0x2e, 0x00 movl %eax, %edi callq hipGetErrorString .Ltmp84: # %bb.90: .cfi_escape 0x2e, 0x00 movl $.L.str.5, %esi movl $.L.str.6, %edx movl $.L.str.23, %r8d movq %rbx, %rdi movl $250, %ecx jmp .LBB2_24 .LBB2_94: movq stderr(%rip), %rbx .Ltmp88: .cfi_escape 0x2e, 0x00 movl %eax, %edi callq hipGetErrorString .Ltmp89: # %bb.95: .cfi_escape 0x2e, 0x00 movl $.L.str.5, %esi movl $.L.str.6, %edx movl $.L.str.24, %r8d movq %rbx, %rdi movl $251, %ecx jmp .LBB2_24 .LBB2_99: movq stderr(%rip), %rbx .Ltmp93: .cfi_escape 0x2e, 0x00 movl %eax, %edi callq hipGetErrorString .Ltmp94: # %bb.100: .cfi_escape 0x2e, 0x00 movl $.L.str.5, %esi movl $.L.str.6, %edx movl $.L.str.25, %r8d movq %rbx, %rdi movl $261, %ecx # imm = 0x105 .LBB2_24: movq %rax, %r9 xorl %eax, %eax callq fprintf .cfi_escape 0x2e, 0x00 movl $1, %edi callq exit .LBB2_74: .Ltmp53: movq %rax, %rbx jmp .LBB2_107 .LBB2_73: .Ltmp50: movq %rax, %rbx jmp .LBB2_108 .LBB2_25: .Ltmp2: movq %rax, %rbx jmp .LBB2_109 .LBB2_101: .Ltmp95: jmp .LBB2_106 .LBB2_96: .Ltmp90: jmp .LBB2_106 .LBB2_91: .Ltmp85: jmp .LBB2_106 .LBB2_86: .Ltmp80: jmp .LBB2_106 .LBB2_51: .Ltmp47: movq %rax, %rbx jmp .LBB2_108 .LBB2_46: .Ltmp42: movq %rax, %rbx jmp .LBB2_108 .LBB2_41: .Ltmp37: movq %rax, %rbx jmp .LBB2_108 .LBB2_28: .Ltmp19: movq %rax, %rbx jmp .LBB2_108 .LBB2_26: .Ltmp14: movq %rax, %rbx jmp .LBB2_108 .LBB2_110: .Ltmp32: movq %rax, %rbx jmp .LBB2_108 .LBB2_105: .Ltmp98: jmp .LBB2_106 .LBB2_85: # %.loopexit.split-lp .Ltmp101: jmp .LBB2_106 .LBB2_27: .Ltmp9: movq %rax, %rbx jmp .LBB2_108 .LBB2_84: # %.loopexit .Ltmp71: jmp .LBB2_106 .LBB2_83: .Ltmp62: .LBB2_106: movq %rax, %rbx .cfi_escape 0x2e, 0x00 leaq 96(%rsp), %rdi callq _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev .LBB2_107: .cfi_escape 0x2e, 0x00 leaq 608(%rsp), %rdi callq _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev .LBB2_108: .cfi_escape 0x2e, 0x00 leaq 1648(%rsp), %rdi movl $_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE, %esi callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev leaq 1904(%rsp), %rdi .cfi_escape 0x2e, 0x00 callq _ZNSt8ios_baseD2Ev .LBB2_109: .cfi_escape 0x2e, 0x00 leaq 1128(%rsp), %rdi movl $_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE, %esi callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev leaq 1384(%rsp), %rdi .cfi_escape 0x2e, 0x00 callq _ZNSt8ios_baseD2Ev .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _Unwind_Resume@PLT .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc .section .gcc_except_table,"a",@progbits .p2align 2, 0x0 GCC_except_table2: .Lexception0: .byte 255 # @LPStart Encoding = omit .byte 255 # @TType Encoding = omit .byte 1 # Call site Encoding = uleb128 .uleb128 .Lcst_end0-.Lcst_begin0 .Lcst_begin0: .uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 << .uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0 .byte 0 # has no landing pad .byte 0 # On action: cleanup .uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 << .uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1 .uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2 .byte 0 # On action: cleanup .uleb128 .Ltmp3-.Lfunc_begin0 # >> Call Site 3 << .uleb128 .Ltmp6-.Ltmp3 # Call between .Ltmp3 and .Ltmp6 .uleb128 .Ltmp14-.Lfunc_begin0 # jumps to .Ltmp14 .byte 0 # On action: cleanup .uleb128 .Ltmp7-.Lfunc_begin0 # >> Call Site 4 << .uleb128 .Ltmp8-.Ltmp7 # Call between .Ltmp7 and .Ltmp8 .uleb128 .Ltmp9-.Lfunc_begin0 # jumps to .Ltmp9 .byte 0 # On action: cleanup .uleb128 .Ltmp10-.Lfunc_begin0 # >> Call Site 5 << .uleb128 .Ltmp13-.Ltmp10 # Call between .Ltmp10 and .Ltmp13 .uleb128 .Ltmp14-.Lfunc_begin0 # jumps to .Ltmp14 .byte 0 # On action: cleanup .uleb128 .Ltmp15-.Lfunc_begin0 # >> Call Site 6 << .uleb128 .Ltmp16-.Ltmp15 # Call between .Ltmp15 and .Ltmp16 .uleb128 .Ltmp19-.Lfunc_begin0 # jumps to .Ltmp19 .byte 0 # On action: cleanup .uleb128 .Ltmp20-.Lfunc_begin0 # >> Call Site 7 << .uleb128 .Ltmp31-.Ltmp20 # Call between .Ltmp20 and .Ltmp31 .uleb128 .Ltmp32-.Lfunc_begin0 # jumps to .Ltmp32 .byte 0 # On action: cleanup .uleb128 .Ltmp33-.Lfunc_begin0 # >> Call Site 8 << .uleb128 .Ltmp34-.Ltmp33 # Call between .Ltmp33 and .Ltmp34 .uleb128 .Ltmp37-.Lfunc_begin0 # jumps to .Ltmp37 .byte 0 # On action: cleanup .uleb128 .Ltmp38-.Lfunc_begin0 # >> Call Site 9 << .uleb128 .Ltmp39-.Ltmp38 # Call between .Ltmp38 and .Ltmp39 .uleb128 .Ltmp42-.Lfunc_begin0 # jumps to .Ltmp42 .byte 0 # On action: cleanup .uleb128 .Ltmp43-.Lfunc_begin0 # >> Call Site 10 << .uleb128 .Ltmp44-.Ltmp43 # Call between .Ltmp43 and .Ltmp44 .uleb128 .Ltmp47-.Lfunc_begin0 # jumps to .Ltmp47 .byte 0 # On action: cleanup .uleb128 .Ltmp48-.Lfunc_begin0 # >> Call Site 11 << .uleb128 .Ltmp49-.Ltmp48 # Call between .Ltmp48 and .Ltmp49 .uleb128 .Ltmp50-.Lfunc_begin0 # jumps to .Ltmp50 .byte 0 # On action: cleanup .uleb128 .Ltmp51-.Lfunc_begin0 # >> Call Site 12 << .uleb128 .Ltmp52-.Ltmp51 # Call between .Ltmp51 and .Ltmp52 .uleb128 .Ltmp53-.Lfunc_begin0 # jumps to .Ltmp53 .byte 0 # On action: cleanup .uleb128 .Ltmp54-.Lfunc_begin0 # >> Call Site 13 << .uleb128 .Ltmp57-.Ltmp54 # Call between .Ltmp54 and .Ltmp57 .uleb128 .Ltmp98-.Lfunc_begin0 # jumps to .Ltmp98 .byte 0 # On action: cleanup .uleb128 .Ltmp58-.Lfunc_begin0 # >> Call Site 14 << .uleb128 .Ltmp61-.Ltmp58 # Call between .Ltmp58 and .Ltmp61 .uleb128 .Ltmp62-.Lfunc_begin0 # jumps to .Ltmp62 .byte 0 # On action: cleanup .uleb128 .Ltmp63-.Lfunc_begin0 # >> Call Site 15 << .uleb128 .Ltmp70-.Ltmp63 # Call between .Ltmp63 and .Ltmp70 .uleb128 .Ltmp71-.Lfunc_begin0 # jumps to .Ltmp71 .byte 0 # On action: cleanup .uleb128 .Ltmp72-.Lfunc_begin0 # >> Call Site 16 << .uleb128 .Ltmp75-.Ltmp72 # Call between .Ltmp72 and .Ltmp75 .uleb128 .Ltmp98-.Lfunc_begin0 # jumps to .Ltmp98 .byte 0 # On action: cleanup .uleb128 .Ltmp76-.Lfunc_begin0 # >> Call Site 17 << .uleb128 .Ltmp77-.Ltmp76 # Call between .Ltmp76 and .Ltmp77 .uleb128 .Ltmp80-.Lfunc_begin0 # jumps to .Ltmp80 .byte 0 # On action: cleanup .uleb128 .Ltmp81-.Lfunc_begin0 # >> Call Site 18 << .uleb128 .Ltmp82-.Ltmp81 # Call between .Ltmp81 and .Ltmp82 .uleb128 .Ltmp85-.Lfunc_begin0 # jumps to .Ltmp85 .byte 0 # On action: cleanup .uleb128 .Ltmp86-.Lfunc_begin0 # >> Call Site 19 << .uleb128 .Ltmp87-.Ltmp86 # Call between .Ltmp86 and .Ltmp87 .uleb128 .Ltmp90-.Lfunc_begin0 # jumps to .Ltmp90 .byte 0 # On action: cleanup .uleb128 .Ltmp91-.Lfunc_begin0 # >> Call Site 20 << .uleb128 .Ltmp92-.Ltmp91 # Call between .Ltmp91 and .Ltmp92 .uleb128 .Ltmp95-.Lfunc_begin0 # jumps to .Ltmp95 .byte 0 # On action: cleanup .uleb128 .Ltmp96-.Lfunc_begin0 # >> Call Site 21 << .uleb128 .Ltmp97-.Ltmp96 # Call between .Ltmp96 and .Ltmp97 .uleb128 .Ltmp98-.Lfunc_begin0 # jumps to .Ltmp98 .byte 0 # On action: cleanup .uleb128 .Ltmp99-.Lfunc_begin0 # >> Call Site 22 << .uleb128 .Ltmp100-.Ltmp99 # Call between .Ltmp99 and .Ltmp100 .uleb128 .Ltmp101-.Lfunc_begin0 # jumps to .Ltmp101 .byte 0 # On action: cleanup .uleb128 .Ltmp100-.Lfunc_begin0 # >> Call Site 23 << .uleb128 .Ltmp17-.Ltmp100 # Call between .Ltmp100 and .Ltmp17 .byte 0 # has no landing pad .byte 0 # On action: cleanup .uleb128 .Ltmp17-.Lfunc_begin0 # >> Call Site 24 << .uleb128 .Ltmp18-.Ltmp17 # Call between .Ltmp17 and .Ltmp18 .uleb128 .Ltmp19-.Lfunc_begin0 # jumps to .Ltmp19 .byte 0 # On action: cleanup .uleb128 .Ltmp35-.Lfunc_begin0 # >> Call Site 25 << .uleb128 .Ltmp36-.Ltmp35 # Call between .Ltmp35 and .Ltmp36 .uleb128 .Ltmp37-.Lfunc_begin0 # jumps to .Ltmp37 .byte 0 # On action: cleanup .uleb128 .Ltmp40-.Lfunc_begin0 # >> Call Site 26 << .uleb128 .Ltmp41-.Ltmp40 # Call between .Ltmp40 and .Ltmp41 .uleb128 .Ltmp42-.Lfunc_begin0 # jumps to .Ltmp42 .byte 0 # On action: cleanup .uleb128 .Ltmp45-.Lfunc_begin0 # >> Call Site 27 << .uleb128 .Ltmp46-.Ltmp45 # Call between .Ltmp45 and .Ltmp46 .uleb128 .Ltmp47-.Lfunc_begin0 # jumps to .Ltmp47 .byte 0 # On action: cleanup .uleb128 .Ltmp78-.Lfunc_begin0 # >> Call Site 28 << .uleb128 .Ltmp79-.Ltmp78 # Call between .Ltmp78 and .Ltmp79 .uleb128 .Ltmp80-.Lfunc_begin0 # jumps to .Ltmp80 .byte 0 # On action: cleanup .uleb128 .Ltmp83-.Lfunc_begin0 # >> Call Site 29 << .uleb128 .Ltmp84-.Ltmp83 # Call between .Ltmp83 and .Ltmp84 .uleb128 .Ltmp85-.Lfunc_begin0 # jumps to .Ltmp85 .byte 0 # On action: cleanup .uleb128 .Ltmp88-.Lfunc_begin0 # >> Call Site 30 << .uleb128 .Ltmp89-.Ltmp88 # Call between .Ltmp88 and .Ltmp89 .uleb128 .Ltmp90-.Lfunc_begin0 # jumps to .Ltmp90 .byte 0 # On action: cleanup .uleb128 .Ltmp93-.Lfunc_begin0 # >> Call Site 31 << .uleb128 .Ltmp94-.Ltmp93 # Call between .Ltmp93 and .Ltmp94 .uleb128 .Ltmp95-.Lfunc_begin0 # jumps to .Ltmp95 .byte 0 # On action: cleanup .uleb128 .Ltmp94-.Lfunc_begin0 # >> Call Site 32 << .uleb128 .Lfunc_end2-.Ltmp94 # Call between .Ltmp94 and .Lfunc_end2 .byte 0 # has no landing pad .byte 0 # On action: cleanup .Lcst_end0: .p2align 2, 0x0 # -- End function .text .p2align 4, 0x90 # -- Begin function _ZL11printMatrixPfii .type _ZL11printMatrixPfii,@function _ZL11printMatrixPfii: # @_ZL11printMatrixPfii .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl %esi, %r14d movq %rdi, %rbx movl $91, %edi callq putchar@PLT movss (%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf movss 4(%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf movss 8(%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf movl $.L.str.31, %edi xorl %eax, %eax callq printf leal -2(%r14), %eax movss (%rbx,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf leal -1(%r14), %eax movss (%rbx,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf movl $.Lstr.6, %edi callq puts@PLT movl $91, %edi callq putchar@PLT movl %r14d, %eax movss (%rbx,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf movslq %r14d, %r14 movss 4(%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf movss 8(%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf movl $.L.str.31, %edi xorl %eax, %eax callq printf leal -2(,%r14,2), %eax movss (%rbx,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf leal -1(,%r14,2), %eax movss (%rbx,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf movl $.Lstr.6, %edi callq puts@PLT movl $2, %edi movq %rbx, %rsi movl %r14d, %edx callq _ZL8printRowiPfi movl $.Lstr.5, %edi callq puts@PLT movl $98, %edi movq %rbx, %rsi movl %r14d, %edx callq _ZL8printRowiPfi movl $99, %edi movq %rbx, %rsi movl %r14d, %edx addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 jmp _ZL8printRowiPfi # TAILCALL .Lfunc_end3: .size _ZL11printMatrixPfii, .Lfunc_end3-_ZL11printMatrixPfii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function _ZL30__device_stub__getDistance_gpuPfS_ii .type _ZL30__device_stub__getDistance_gpuPfS_ii,@function _ZL30__device_stub__getDistance_gpuPfS_ii: # @_ZL30__device_stub__getDistance_gpuPfS_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_ZL15getDistance_gpuPfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end4: .size _ZL30__device_stub__getDistance_gpuPfS_ii, .Lfunc_end4-_ZL30__device_stub__getDistance_gpuPfS_ii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function _ZL25__device_stub__argMin_gpuPfPiii .type _ZL25__device_stub__argMin_gpuPfPiii,@function _ZL25__device_stub__argMin_gpuPfPiii: # @_ZL25__device_stub__argMin_gpuPfPiii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_ZL10argMin_gpuPfPiii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end5: .size _ZL25__device_stub__argMin_gpuPfPiii, .Lfunc_end5-_ZL25__device_stub__argMin_gpuPfPiii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function _ZL8printRowiPfi .type _ZL8printRowiPfi,@function _ZL8printRowiPfi: # @_ZL8printRowiPfi .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %edx, %r14d movq %rsi, %rbx movl %edi, %r15d movl $91, %edi callq putchar@PLT imull %r14d, %r15d movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf movslq %r15d, %r15 movss 4(%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf movss 8(%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf movl $.L.str.31, %edi xorl %eax, %eax callq printf leal (%r15,%r14), %eax addl $-2, %eax movss (%rbx,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf leal (%r15,%r14), %eax decl %eax movss (%rbx,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.30, %edi movb $1, %al callq printf movl $.Lstr.6, %edi popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 jmp puts@PLT # TAILCALL .Lfunc_end6: .size _ZL8printRowiPfi, .Lfunc_end6-_ZL8printRowiPfi .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB7_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB7_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_ZL15getDistance_gpuPfS_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_ZL10argMin_gpuPfPiii, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end7: .size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB8_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB8_2: retq .Lfunc_end8: .size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%d, %d, %d, %d\n" .size .L.str, 16 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "using (%d x %d) tiles.\n" .size .L.str.3, 24 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "Unable to allocate host memory\n" .size .L.str.4, 32 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "CUDA error: %s, line %d, %s: %s\n" .size .L.str.5, 33 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/goldenpartner/Face-recognition/master/mp3.hip" .size .L.str.6, 103 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "hipMalloc((void **)&d_M, bytesForM)" .size .L.str.7, 36 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "hipMalloc((void **)&d_P, bytesForP)" .size .L.str.8, 36 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz "hipMalloc((void **)&d_index, m*sizeof(int))" .size .L.str.9, 44 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "Loading matrices...\n" .size .L.str.10, 21 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz "descriptor.txt" .size .L.str.11, 15 .type .L.str.13,@object # @.str.13 .L.str.13: .asciz "hipMemcpy(d_M, h_M, bytesForM, hipMemcpyHostToDevice)" .size .L.str.13, 54 .type _ZL15getDistance_gpuPfS_ii,@object # @_ZL15getDistance_gpuPfS_ii .section .rodata,"a",@progbits .p2align 3, 0x0 _ZL15getDistance_gpuPfS_ii: .quad _ZL30__device_stub__getDistance_gpuPfS_ii .size _ZL15getDistance_gpuPfS_ii, 8 .type _ZL10argMin_gpuPfPiii,@object # @_ZL10argMin_gpuPfPiii .p2align 3, 0x0 _ZL10argMin_gpuPfPiii: .quad _ZL25__device_stub__argMin_gpuPfPiii .size _ZL10argMin_gpuPfPiii, 8 .type .L.str.15,@object # @.str.15 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.15: .asciz "hipDeviceSynchronize()" .size .L.str.15, 23 .type .L.str.16,@object # @.str.16 .L.str.16: .asciz "hipMemcpy(h_P, d_P, bytesForP, hipMemcpyDeviceToHost)" .size .L.str.16, 54 .type .L.str.17,@object # @.str.17 .L.str.17: .asciz "hipMemcpy(index, d_index, m*sizeof(int), hipMemcpyDeviceToHost)" .size .L.str.17, 64 .type .L.str.20,@object # @.str.20 .L.str.20: .asciz "matrix.txt" .size .L.str.20, 11 .type .L.str.21,@object # @.str.21 .L.str.21: .asciz " " .size .L.str.21, 2 .type .L.str.22,@object # @.str.22 .L.str.22: .asciz "hipFree(d_M)" .size .L.str.22, 13 .type .L.str.23,@object # @.str.23 .L.str.23: .asciz "hipFree(d_P)" .size .L.str.23, 13 .type .L.str.24,@object # @.str.24 .L.str.24: .asciz "hipFree(d_index)" .size .L.str.24, 17 .type .L.str.25,@object # @.str.25 .L.str.25: .asciz "hipDeviceReset()" .size .L.str.25, 17 .type .L.str.27,@object # @.str.27 .L.str.27: .asciz "Pause" .size .L.str.27, 6 .type .L.str.30,@object # @.str.30 .L.str.30: .asciz " %3.3f" .size .L.str.30, 7 .type .L.str.31,@object # @.str.31 .L.str.31: .asciz " ..." .size .L.str.31, 5 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_ZL15getDistance_gpuPfS_ii" .size .L__unnamed_1, 27 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_ZL10argMin_gpuPfPiii" .size .L__unnamed_2, 22 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "M =" .size .Lstr, 4 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "matMul called from host" .size .Lstr.1, 24 .type .Lstr.2,@object # @str.2 .Lstr.2: .asciz " product received from host" .size .Lstr.2, 28 .type .Lstr.3,@object # @str.3 .Lstr.3: .asciz "P =" .size .Lstr.3, 4 .type .Lstr.4,@object # @str.4 .Lstr.4: .asciz "Done" .size .Lstr.4, 5 .type .Lstr.5,@object # @str.5 .Lstr.5: .asciz " ..." .size .Lstr.5, 6 .type .Lstr.6,@object # @str.6 .Lstr.6: .asciz " ]" .size .Lstr.6, 3 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __gxx_personality_v0 .addrsig_sym _ZL30__device_stub__getDistance_gpuPfS_ii .addrsig_sym _ZL25__device_stub__argMin_gpuPfPiii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Unwind_Resume .addrsig_sym _ZSt4cout .addrsig_sym _ZL15getDistance_gpuPfS_ii .addrsig_sym _ZL10argMin_gpuPfPiii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <cuda_runtime.h> #include <stdio.h> __global__ void checkIndex() { printf("ThreadIdx:(%d %d %d) blockIdx:(%d %d %d) blockDim:(%d %d %d) " "gridDim:(%d %d %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } int main(int argc, char **argv) { // define total data element int nElem = 6; // define grid and block structure dim3 block(3); dim3 grid((nElem+block.x-1) / block.x); // check grid and block dimension from host side printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); // check grid and block dimension from device side checkIndex <<<grid, block>>> (); // reset device before leaving cudaDeviceReset(); return 0; }
code for sm_80 Function : _Z10checkIndexv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fc800078e00ff */ /*0010*/ S2R R11, SR_CTAID.X ; /* 0x00000000000b7919 */ /* 0x000e220000002500 */ /*0020*/ IADD3 R1, R1, -0x30, RZ ; /* 0xffffffd001017810 */ /* 0x000fe20007ffe0ff */ /*0030*/ IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0e7624 */ /* 0x000fe200078e00ff */ /*0040*/ MOV R18, c[0x0][0x10] ; /* 0x0000040000127a02 */ /* 0x000fe20000000f00 */ /*0050*/ S2R R10, SR_TID.Z ; /* 0x00000000000a7919 */ /* 0x000e220000002300 */ /*0060*/ IMAD.MOV.U32 R15, RZ, RZ, c[0x0][0x4] ; /* 0x00000100ff0f7624 */ /* 0x000fe200078e00ff */ /*0070*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe20000000f00 */ /*0080*/ IMAD.MOV.U32 R16, RZ, RZ, c[0x0][0x8] ; /* 0x00000200ff107624 */ /* 0x000fe200078e00ff */ /*0090*/ S2R R9, SR_TID.Y ; /* 0x0000000000097919 */ /* 0x000e220000002200 */ /*00a0*/ IMAD.MOV.U32 R17, RZ, RZ, c[0x0][0xc] ; /* 0x00000300ff117624 */ /* 0x000fe200078e00ff */ /*00b0*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */ /* 0x0002a20000000a00 */ /*00c0*/ IMAD.MOV.U32 R19, RZ, RZ, c[0x0][0x14] ; /* 0x00000500ff137624 */ /* 0x000fe200078e00ff */ /*00d0*/ S2R R8, SR_TID.X ; /* 0x0000000000087919 */ /* 0x000e220000002100 */ /*00e0*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */ /* 0x000fe20007f1e0ff */ /*00f0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */ /* 0x000fe200078e00ff */ /*0100*/ MOV R5, c[0x4][0xc] ; /* 0x0100030000057a02 */ /* 0x000fe20000000f00 */ /*0110*/ S2R R13, SR_CTAID.Z ; /* 0x00000000000d7919 */ /* 0x000ee40000002700 */ /*0120*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */ /* 0x000fc400000e06ff */ /*0130*/ S2R R12, SR_CTAID.Y ; /* 0x00000000000c7919 */ /* 0x000ee80000002600 */ /*0140*/ STL.128 [R1+0x20], R16 ; /* 0x0000201001007387 */ /* 0x0003e80000100c00 */ /*0150*/ STL.128 [R1], R8 ; /* 0x0000000801007387 */ /* 0x0013e80000100c00 */ /*0160*/ STL.128 [R1+0x10], R12 ; /* 0x0000100c01007387 */ /* 0x0083e40000100c00 */ /*0170*/ LEPC R8 ; /* 0x000000000008734e */ /* 0x006fe40000000000 */ /*0180*/ MOV R11, 0x1f0 ; /* 0x000001f0000b7802 */ /* 0x000fe40000000f00 */ /*0190*/ MOV R20, 0x170 ; /* 0x0000017000147802 */ /* 0x000fc40000000f00 */ /*01a0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*01b0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*01c0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */ /* 0x000fc8000791e108 */ /*01d0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2509 */ /*01e0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */ /* 0x000fea0003c00000 */ /*01f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0200*/ BRA 0x200; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0280*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0290*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cuda_runtime.h> #include <stdio.h> __global__ void checkIndex() { printf("ThreadIdx:(%d %d %d) blockIdx:(%d %d %d) blockDim:(%d %d %d) " "gridDim:(%d %d %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } int main(int argc, char **argv) { // define total data element int nElem = 6; // define grid and block structure dim3 block(3); dim3 grid((nElem+block.x-1) / block.x); // check grid and block dimension from host side printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); // check grid and block dimension from device side checkIndex <<<grid, block>>> (); // reset device before leaving cudaDeviceReset(); return 0; }
.file "tmpxft_0016af43_00000000-6_checkDimension.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z29__device_stub__Z10checkIndexvv .type _Z29__device_stub__Z10checkIndexvv, @function _Z29__device_stub__Z10checkIndexvv: .LFB2082: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z10checkIndexv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z29__device_stub__Z10checkIndexvv, .-_Z29__device_stub__Z10checkIndexvv .globl _Z10checkIndexv .type _Z10checkIndexv, @function _Z10checkIndexv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z10checkIndexvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z10checkIndexv, .-_Z10checkIndexv .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "grid.x %d grid.y %d grid.z %d\n" .align 8 .LC1: .string "block.x %d block.y %d block.z %d\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movl $1, %r8d movl $1, %ecx movl $2, %edx leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %r8d movl $1, %ecx movl $3, %edx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $2, 20(%rsp) movl $1, 24(%rsp) movl $3, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 8(%rsp), %rdx movl $1, %ecx movq 20(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: call cudaDeviceReset@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z29__device_stub__Z10checkIndexvv jmp .L12 .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "_Z10checkIndexv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z10checkIndexv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cuda_runtime.h> #include <stdio.h> __global__ void checkIndex() { printf("ThreadIdx:(%d %d %d) blockIdx:(%d %d %d) blockDim:(%d %d %d) " "gridDim:(%d %d %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } int main(int argc, char **argv) { // define total data element int nElem = 6; // define grid and block structure dim3 block(3); dim3 grid((nElem+block.x-1) / block.x); // check grid and block dimension from host side printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); // check grid and block dimension from device side checkIndex <<<grid, block>>> (); // reset device before leaving cudaDeviceReset(); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void checkIndex() { printf("ThreadIdx:(%d %d %d) blockIdx:(%d %d %d) blockDim:(%d %d %d) " "gridDim:(%d %d %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } int main(int argc, char **argv) { // define total data element int nElem = 6; // define grid and block structure dim3 block(3); dim3 grid((nElem+block.x-1) / block.x); // check grid and block dimension from host side printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); // check grid and block dimension from device side checkIndex <<<grid, block>>> (); // reset device before leaving hipDeviceReset(); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void checkIndex() { printf("ThreadIdx:(%d %d %d) blockIdx:(%d %d %d) blockDim:(%d %d %d) " "gridDim:(%d %d %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } int main(int argc, char **argv) { // define total data element int nElem = 6; // define grid and block structure dim3 block(3); dim3 grid((nElem+block.x-1) / block.x); // check grid and block dimension from host side printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); // check grid and block dimension from device side checkIndex <<<grid, block>>> (); // reset device before leaving hipDeviceReset(); return 0; }
.text .file "checkDimension.hip" .globl _Z25__device_stub__checkIndexv # -- Begin function _Z25__device_stub__checkIndexv .p2align 4, 0x90 .type _Z25__device_stub__checkIndexv,@function _Z25__device_stub__checkIndexv: # @_Z25__device_stub__checkIndexv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z10checkIndexv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z25__device_stub__checkIndexv, .Lfunc_end0-_Z25__device_stub__checkIndexv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 movl $.L.str, %edi movl $2, %esi movl $1, %edx movl $1, %ecx xorl %eax, %eax callq printf movl $.L.str.1, %edi movl $3, %esi movl $1, %edx movl $1, %ecx xorl %eax, %eax callq printf movabsq $4294967298, %rdi # imm = 0x100000002 leaq 1(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z10checkIndexv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceReset xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10checkIndexv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z10checkIndexv,@object # @_Z10checkIndexv .section .rodata,"a",@progbits .globl _Z10checkIndexv .p2align 3, 0x0 _Z10checkIndexv: .quad _Z25__device_stub__checkIndexv .size _Z10checkIndexv, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "grid.x %d grid.y %d grid.z %d\n" .size .L.str, 31 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "block.x %d block.y %d block.z %d\n" .size .L.str.1, 34 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z10checkIndexv" .size .L__unnamed_1, 16 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__checkIndexv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10checkIndexv .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0016af43_00000000-6_checkDimension.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z29__device_stub__Z10checkIndexvv .type _Z29__device_stub__Z10checkIndexvv, @function _Z29__device_stub__Z10checkIndexvv: .LFB2082: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z10checkIndexv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z29__device_stub__Z10checkIndexvv, .-_Z29__device_stub__Z10checkIndexvv .globl _Z10checkIndexv .type _Z10checkIndexv, @function _Z10checkIndexv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z10checkIndexvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z10checkIndexv, .-_Z10checkIndexv .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "grid.x %d grid.y %d grid.z %d\n" .align 8 .LC1: .string "block.x %d block.y %d block.z %d\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movl $1, %r8d movl $1, %ecx movl $2, %edx leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %r8d movl $1, %ecx movl $3, %edx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $2, 20(%rsp) movl $1, 24(%rsp) movl $3, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 8(%rsp), %rdx movl $1, %ecx movq 20(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: call cudaDeviceReset@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z29__device_stub__Z10checkIndexvv jmp .L12 .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "_Z10checkIndexv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z10checkIndexv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "checkDimension.hip" .globl _Z25__device_stub__checkIndexv # -- Begin function _Z25__device_stub__checkIndexv .p2align 4, 0x90 .type _Z25__device_stub__checkIndexv,@function _Z25__device_stub__checkIndexv: # @_Z25__device_stub__checkIndexv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z10checkIndexv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z25__device_stub__checkIndexv, .Lfunc_end0-_Z25__device_stub__checkIndexv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 movl $.L.str, %edi movl $2, %esi movl $1, %edx movl $1, %ecx xorl %eax, %eax callq printf movl $.L.str.1, %edi movl $3, %esi movl $1, %edx movl $1, %ecx xorl %eax, %eax callq printf movabsq $4294967298, %rdi # imm = 0x100000002 leaq 1(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z10checkIndexv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceReset xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10checkIndexv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z10checkIndexv,@object # @_Z10checkIndexv .section .rodata,"a",@progbits .globl _Z10checkIndexv .p2align 3, 0x0 _Z10checkIndexv: .quad _Z25__device_stub__checkIndexv .size _Z10checkIndexv, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "grid.x %d grid.y %d grid.z %d\n" .size .L.str, 31 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "block.x %d block.y %d block.z %d\n" .size .L.str.1, 34 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z10checkIndexv" .size .L__unnamed_1, 16 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__checkIndexv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10checkIndexv .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* * ising_cuda_v1.cu * * Created on: Dec 26, 2019 * Author: Charalampos Eleftheriadis */ #include <stdio.h> #include <stdlib.h> #include <time.h> #define N 512 #define threadsNum 64 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // Kernel Function. __global__ void spin(int *G, double *w, int *newG, int n) { // Calculates Atomic Spin index. int index = blockIdx.x*blockDim.x + threadIdx.x; // Checks for out of bounds indexing and if so quits. if (index >= n*n) return; double weightSum = 0; // Calculates weight contribution for each neighboring Atomic Spin and sums it. weightSum += w[0] * G[((index/n - 2 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[1] * G[((index/n - 2 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[2] * G[((index/n - 2 + n)%n) * n + (index)%n]; weightSum += w[3] * G[((index/n - 2 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[4] * G[((index/n - 2 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[5] * G[((index/n - 1 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[6] * G[((index/n - 1 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[7] * G[((index/n - 1 + n)%n) * n + (index)%n]; weightSum += w[8] * G[((index/n - 1 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[9] * G[((index/n - 1 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[10] * G[((index/n + n)%n) * n + (index - 2 + n)%n]; weightSum += w[11] * G[((index/n + n)%n) * n + (index - 1 + n)%n]; // w[12] is not contributing anything. It's the current Atomic Spin. weightSum += w[13] * G[((index/n + n)%n) * n + (index + 1 + n)%n]; weightSum += w[14] * G[((index/n + n)%n) * n + (index + 2 + n)%n]; weightSum += w[15] * G[((index/n + 1 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[16] * G[((index/n + 1 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[17] * G[((index/n + 1 + n)%n) * n + (index)%n]; weightSum += w[18] * G[((index/n + 1 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[19] * G[((index/n + 1 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[20] * G[((index/n + 2 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[21] * G[((index/n + 2 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[22] * G[((index/n + 2 + n)%n) * n + (index)%n]; weightSum += w[23] * G[((index/n + 2 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[24] * G[((index/n + 2 + n)%n) * n + (index + 2 + n)%n]; //! Can it be done more efficiently? if (weightSum > 0.0001) newG[index] = 1; else if (weightSum < -0.0001) newG[index] = -1; else newG[index] = G[index]; } // Kernel Function that checks whether the new Atomic Spins Matrix is the same as the old one. __global__ void check(int *G, int *newG, int n, int *same) { // Calculates Atomic Spin index. int index = blockIdx.x*blockDim.x + threadIdx.x; // Checks for out of bounds indexing and if so quits. if (index >= n*n) return; if (G[index] != newG[index]) *same = 0; } void ising(int *G, double *w, int k, int n) { // Creates and transfers the Weight Matrix to GPU memory. double *w_d; int w_size = 25*sizeof(double); gpuErrchk( cudaMalloc((void **) &w_d, w_size) ); gpuErrchk( cudaMemcpy(w_d, w, w_size, cudaMemcpyHostToDevice) ); // Creates and transfers the Atomic Spins Matrix to GPU memory. int *G_d; int G_size = n*n*sizeof(int); gpuErrchk( cudaMalloc((void **) &G_d, G_size) ); gpuErrchk( cudaMemcpy(G_d, G, G_size, cudaMemcpyHostToDevice) ); // Creates the new Atomic Spins Matrix to GPU memory. int *newG_d; gpuErrchk( cudaMalloc((void **) &newG_d, G_size) ); // Creates and transfers a flag that states whether the new Atomic Spins Matrix and the old are the same to GPU memory. int same = 1; int *same_d; gpuErrchk( cudaMalloc((void **) &same_d, sizeof(int)) ); gpuErrchk( cudaMemcpy(same_d, &same, sizeof(int), cudaMemcpyHostToDevice) ); // Creates a temporary variable for Atomic Spins Matrices' pointers swapping. int *temp_d; // Checks if function has to be iterated. for (int i=0; i<k; i++) { // Calls the kernel function balancing load to (n*n+threadsNum-1)/threadsNum blocks with threadsNum threads each. spin<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, w_d, newG_d, n); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); check<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, newG_d, n, same_d); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk( cudaMemcpy(&same, same_d, sizeof(int), cudaMemcpyDeviceToHost) ); if (same) break; // Atomix Spin Matrices' pointers swapping. temp_d = G_d; G_d = newG_d; newG_d = temp_d; } // Copies data from GPU to CPU memory. gpuErrchk( cudaMemcpy(G, G_d, G_size, cudaMemcpyDeviceToHost) ); // Cleanup. gpuErrchk( cudaFree(w_d) ); gpuErrchk( cudaFree(G_d) ); gpuErrchk( cudaFree(newG_d) ); } int main() { // Weight Matrix. double w[] = { 0.004, 0.016, 0.026, 0.016, 0.004, 0.016, 0.071, 0.117, 0.071, 0.016, 0.026, 0.117, 0.000, 0.117, 0.026, 0.016, 0.071, 0.117, 0.071, 0.016, 0.004, 0.016, 0.026, 0.016, 0.004 }; // Number of dimensions for the square Atomic Spins Matrix. int n = N; // Allocates memory for the Atomic Spins Matrix. int *G = (int *)malloc(n*n * sizeof(int)); // Randomizes seed. srand(time(NULL)); // Fills the Atomic Spins Matrix with "-1" and "1" values from a uniform distribution. for (int i=0; i<n*n; i++) G[i] = ((rand() % 2) * 2) - 1; /* // Reads configuration file. size_t readStatus; FILE *conf_init = fopen("conf-init.bin","rb"); int initG[n*n]; readStatus = fread(&initG, sizeof(int), n*n, conf_init); if (readStatus != n*n) printf("Could not read conf-init.bin file.\n"); fclose(conf_init); // Fills the Atomic Spins Matrix with "-1" and "1" values from configuration file. for (int i=0; i<n*n; i++) G[i] = initG[i]; */ ising(G, w, 10, n); /* // Reads configuration file for state after one iteration. size_t readStatus1; FILE *conf_1 = fopen("conf-1.bin","rb"); int G1[n*n]; readStatus1 = fread(&G1, sizeof(int), n*n, conf_1); if (readStatus1 != n*n) printf("Could not read conf-1.bin file.\n"); fclose(conf_1); // Checks for errors. int errorsNum = 0; for (int i=0; i<n; i++) for (int j=0; j<n; j++) if (G[i*n+j] != G1[i*n+j]) errorsNum++; if (errorsNum == 0) printf("Correct Results!\n"); else printf("Wrong Results. Number of errors: %d\n", errorsNum); // Checks the results. for (int i=0; i<n; i++) { for (int j=0; j<n; j++) { if (G[i*n+j] == G1[i*n+j]) printf("="); else printf("!"); } printf("\n"); } printf("\n\n"); */ return 0; }
.file "tmpxft_000fa854_00000000-6_ising_cuda_v1.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z28__device_stub__Z4spinPiPdS_iPiPdS_i .type _Z28__device_stub__Z4spinPiPdS_iPiPdS_i, @function _Z28__device_stub__Z4spinPiPdS_iPiPdS_i: .LFB2084: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z4spinPiPdS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z28__device_stub__Z4spinPiPdS_iPiPdS_i, .-_Z28__device_stub__Z4spinPiPdS_iPiPdS_i .globl _Z4spinPiPdS_i .type _Z4spinPiPdS_i, @function _Z4spinPiPdS_i: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z28__device_stub__Z4spinPiPdS_iPiPdS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z4spinPiPdS_i, .-_Z4spinPiPdS_i .globl _Z29__device_stub__Z5checkPiS_iS_PiS_iS_ .type _Z29__device_stub__Z5checkPiS_iS_PiS_iS_, @function _Z29__device_stub__Z5checkPiS_iS_PiS_iS_: .LFB2086: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z5checkPiS_iS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2086: .size _Z29__device_stub__Z5checkPiS_iS_PiS_iS_, .-_Z29__device_stub__Z5checkPiS_iS_PiS_iS_ .globl _Z5checkPiS_iS_ .type _Z5checkPiS_iS_, @function _Z5checkPiS_iS_: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z5checkPiS_iS_PiS_iS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _Z5checkPiS_iS_, .-_Z5checkPiS_iS_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "/home/ubuntu/Datasets/stackv2/train-structured/LambisElef/ece-auth-ising-cuda/master/ising_cuda_v1.cu" .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "GPUassert: %s %s %d\n" .text .globl _Z5isingPiPdii .type _Z5isingPiPdii, @function _Z5isingPiPdii: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $104, %rsp .cfi_def_cfa_offset 160 movq %rdi, %rbp movq %rsi, %r14 movl %edx, %r12d movl %ecx, %ebx movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 32(%rsp), %rdi movl $200, %esi call cudaMalloc@PLT testl %eax, %eax jne .L44 movl $1, %ecx movl $200, %edx movq %r14, %rsi movq 32(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %r13d testl %eax, %eax jne .L45 movl %ebx, %r15d imull %ebx, %r15d leal 0(,%r15,4), %r13d movslq %r13d, %r13 leaq 40(%rsp), %rdi movq %r13, %rsi call cudaMalloc@PLT movl %eax, %r14d testl %eax, %eax jne .L46 movl $1, %ecx movq %r13, %rdx movq %rbp, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %r14d testl %eax, %eax jne .L47 leaq 48(%rsp), %rdi movq %r13, %rsi call cudaMalloc@PLT movl %eax, %r14d testl %eax, %eax jne .L48 movl $1, 28(%rsp) leaq 56(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movl %eax, %r14d testl %eax, %eax jne .L49 leaq 28(%rsp), %rsi movl $1, %ecx movl $4, %edx movq 56(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %r14d testl %eax, %eax jne .L26 testl %r12d, %r12d jle .L28 leal 126(%r15), %r14d addl $63, %r15d cmovns %r15d, %r14d sarl $6, %r14d movl $0, 12(%rsp) .L36: movl $64, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) movl %r14d, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $0, %r9d movl $0, %r8d movq 76(%rsp), %rdx movl $1, %ecx movq 64(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L50 .L29: call cudaPeekAtLastError@PLT movl %eax, %r15d testl %eax, %eax jne .L51 call cudaDeviceSynchronize@PLT movl %eax, %r15d testl %eax, %eax jne .L52 movl $64, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) movl %r14d, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $0, %r9d movl $0, %r8d movq 76(%rsp), %rdx movl $1, %ecx movq 64(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L53 .L32: call cudaPeekAtLastError@PLT movl %eax, %r15d testl %eax, %eax jne .L54 call cudaDeviceSynchronize@PLT movl %eax, %r15d testl %eax, %eax jne .L55 leaq 28(%rsp), %rdi movl $2, %ecx movl $4, %edx movq 56(%rsp), %rsi call cudaMemcpy@PLT movl %eax, %r15d testl %eax, %eax jne .L56 cmpl $0, 28(%rsp) jne .L28 movq 40(%rsp), %rax movq 48(%rsp), %rdx movq %rdx, 40(%rsp) movq %rax, 48(%rsp) addl $1, 12(%rsp) movl 12(%rsp), %eax cmpl %eax, %r12d jne .L36 .L28: movl $2, %ecx movq %r13, %rdx movq 40(%rsp), %rsi movq %rbp, %rdi call cudaMemcpy@PLT movl %eax, %ebx testl %eax, %eax jne .L57 movq 32(%rsp), %rdi call cudaFree@PLT movl %eax, %ebx testl %eax, %eax jne .L58 movq 40(%rsp), %rdi call cudaFree@PLT movl %eax, %ebx testl %eax, %eax jne .L59 movq 48(%rsp), %rdi call cudaFree@PLT movl %eax, %ebx testl %eax, %eax jne .L60 movq 88(%rsp), %rax subq %fs:40, %rax jne .L61 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L44: .cfi_restore_state movl %eax, %r13d movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $98, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r13d, %edi call exit@PLT .L45: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $99, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r13d, %edi call exit@PLT .L46: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $104, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r14d, %edi call exit@PLT .L47: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $105, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r14d, %edi call exit@PLT .L48: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $109, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r14d, %edi call exit@PLT .L49: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $114, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r14d, %edi call exit@PLT .L26: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $115, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r14d, %edi call exit@PLT .L50: movl %ebx, %ecx movq 48(%rsp), %rdx movq 32(%rsp), %rsi movq 40(%rsp), %rdi call _Z28__device_stub__Z4spinPiPdS_iPiPdS_i jmp .L29 .L51: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $124, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call exit@PLT .L52: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $125, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call exit@PLT .L53: movq 56(%rsp), %rcx movl %ebx, %edx movq 48(%rsp), %rsi movq 40(%rsp), %rdi call _Z29__device_stub__Z5checkPiS_iS_PiS_iS_ jmp .L32 .L54: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $128, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call exit@PLT .L55: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $129, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call exit@PLT .L56: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $130, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call exit@PLT .L57: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $141, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %ebx, %edi call exit@PLT .L58: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $144, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %ebx, %edi call exit@PLT .L59: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $145, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %ebx, %edi call exit@PLT .L60: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $146, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %ebx, %edi call exit@PLT .L61: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size _Z5isingPiPdii, .-_Z5isingPiPdii .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $208, %rsp .cfi_def_cfa_offset 240 movq %fs:40, %rax movq %rax, 200(%rsp) xorl %eax, %eax movsd .LC2(%rip), %xmm1 movsd %xmm1, (%rsp) movsd .LC3(%rip), %xmm0 movsd %xmm0, 8(%rsp) movsd .LC4(%rip), %xmm2 movsd %xmm2, 16(%rsp) movsd %xmm0, 24(%rsp) movsd %xmm1, 32(%rsp) movsd %xmm0, 40(%rsp) movsd .LC5(%rip), %xmm3 movsd %xmm3, 48(%rsp) movsd .LC6(%rip), %xmm4 movsd %xmm4, 56(%rsp) movsd %xmm3, 64(%rsp) movsd %xmm0, 72(%rsp) movsd %xmm2, 80(%rsp) movsd %xmm4, 88(%rsp) movq $0x000000000, 96(%rsp) movsd %xmm4, 104(%rsp) movsd %xmm2, 112(%rsp) movsd %xmm0, 120(%rsp) movsd %xmm3, 128(%rsp) movsd %xmm4, 136(%rsp) movsd %xmm3, 144(%rsp) movsd %xmm0, 152(%rsp) movsd %xmm1, 160(%rsp) movsd %xmm0, 168(%rsp) movsd %xmm2, 176(%rsp) movsd %xmm0, 184(%rsp) movsd %xmm1, 192(%rsp) movl $1048576, %edi call malloc@PLT movq %rax, %r12 movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT movq %r12, %rbx leaq 1048576(%r12), %rbp .L63: call rand@PLT movl %eax, %edx shrl $31, %edx addl %edx, %eax andl $1, %eax subl %edx, %eax leal -1(%rax,%rax), %eax movl %eax, (%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L63 movq %rsp, %rsi movl $512, %ecx movl $10, %edx movq %r12, %rdi call _Z5isingPiPdii movq 200(%rsp), %rax subq %fs:40, %rax jne .L67 movl $0, %eax addq $208, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L67: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.1 .LC8: .string "_Z5checkPiS_iS_" .LC9: .string "_Z4spinPiPdS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2089: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq _Z5checkPiS_iS_(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _Z4spinPiPdS_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC2: .long -755914244 .long 1064329805 .align 8 .LC3: .long -755914244 .long 1066426957 .align 8 .LC4: .long 1992864825 .long 1067098046 .align 8 .LC5: .long 1443109011 .long 1068641550 .align 8 .LC6: .long 1168231105 .long 1069413302 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* * ising_cuda_v1.cu * * Created on: Dec 26, 2019 * Author: Charalampos Eleftheriadis */ #include <stdio.h> #include <stdlib.h> #include <time.h> #define N 512 #define threadsNum 64 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // Kernel Function. __global__ void spin(int *G, double *w, int *newG, int n) { // Calculates Atomic Spin index. int index = blockIdx.x*blockDim.x + threadIdx.x; // Checks for out of bounds indexing and if so quits. if (index >= n*n) return; double weightSum = 0; // Calculates weight contribution for each neighboring Atomic Spin and sums it. weightSum += w[0] * G[((index/n - 2 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[1] * G[((index/n - 2 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[2] * G[((index/n - 2 + n)%n) * n + (index)%n]; weightSum += w[3] * G[((index/n - 2 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[4] * G[((index/n - 2 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[5] * G[((index/n - 1 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[6] * G[((index/n - 1 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[7] * G[((index/n - 1 + n)%n) * n + (index)%n]; weightSum += w[8] * G[((index/n - 1 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[9] * G[((index/n - 1 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[10] * G[((index/n + n)%n) * n + (index - 2 + n)%n]; weightSum += w[11] * G[((index/n + n)%n) * n + (index - 1 + n)%n]; // w[12] is not contributing anything. It's the current Atomic Spin. weightSum += w[13] * G[((index/n + n)%n) * n + (index + 1 + n)%n]; weightSum += w[14] * G[((index/n + n)%n) * n + (index + 2 + n)%n]; weightSum += w[15] * G[((index/n + 1 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[16] * G[((index/n + 1 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[17] * G[((index/n + 1 + n)%n) * n + (index)%n]; weightSum += w[18] * G[((index/n + 1 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[19] * G[((index/n + 1 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[20] * G[((index/n + 2 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[21] * G[((index/n + 2 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[22] * G[((index/n + 2 + n)%n) * n + (index)%n]; weightSum += w[23] * G[((index/n + 2 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[24] * G[((index/n + 2 + n)%n) * n + (index + 2 + n)%n]; //! Can it be done more efficiently? if (weightSum > 0.0001) newG[index] = 1; else if (weightSum < -0.0001) newG[index] = -1; else newG[index] = G[index]; } // Kernel Function that checks whether the new Atomic Spins Matrix is the same as the old one. __global__ void check(int *G, int *newG, int n, int *same) { // Calculates Atomic Spin index. int index = blockIdx.x*blockDim.x + threadIdx.x; // Checks for out of bounds indexing and if so quits. if (index >= n*n) return; if (G[index] != newG[index]) *same = 0; } void ising(int *G, double *w, int k, int n) { // Creates and transfers the Weight Matrix to GPU memory. double *w_d; int w_size = 25*sizeof(double); gpuErrchk( cudaMalloc((void **) &w_d, w_size) ); gpuErrchk( cudaMemcpy(w_d, w, w_size, cudaMemcpyHostToDevice) ); // Creates and transfers the Atomic Spins Matrix to GPU memory. int *G_d; int G_size = n*n*sizeof(int); gpuErrchk( cudaMalloc((void **) &G_d, G_size) ); gpuErrchk( cudaMemcpy(G_d, G, G_size, cudaMemcpyHostToDevice) ); // Creates the new Atomic Spins Matrix to GPU memory. int *newG_d; gpuErrchk( cudaMalloc((void **) &newG_d, G_size) ); // Creates and transfers a flag that states whether the new Atomic Spins Matrix and the old are the same to GPU memory. int same = 1; int *same_d; gpuErrchk( cudaMalloc((void **) &same_d, sizeof(int)) ); gpuErrchk( cudaMemcpy(same_d, &same, sizeof(int), cudaMemcpyHostToDevice) ); // Creates a temporary variable for Atomic Spins Matrices' pointers swapping. int *temp_d; // Checks if function has to be iterated. for (int i=0; i<k; i++) { // Calls the kernel function balancing load to (n*n+threadsNum-1)/threadsNum blocks with threadsNum threads each. spin<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, w_d, newG_d, n); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); check<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, newG_d, n, same_d); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk( cudaMemcpy(&same, same_d, sizeof(int), cudaMemcpyDeviceToHost) ); if (same) break; // Atomix Spin Matrices' pointers swapping. temp_d = G_d; G_d = newG_d; newG_d = temp_d; } // Copies data from GPU to CPU memory. gpuErrchk( cudaMemcpy(G, G_d, G_size, cudaMemcpyDeviceToHost) ); // Cleanup. gpuErrchk( cudaFree(w_d) ); gpuErrchk( cudaFree(G_d) ); gpuErrchk( cudaFree(newG_d) ); } int main() { // Weight Matrix. double w[] = { 0.004, 0.016, 0.026, 0.016, 0.004, 0.016, 0.071, 0.117, 0.071, 0.016, 0.026, 0.117, 0.000, 0.117, 0.026, 0.016, 0.071, 0.117, 0.071, 0.016, 0.004, 0.016, 0.026, 0.016, 0.004 }; // Number of dimensions for the square Atomic Spins Matrix. int n = N; // Allocates memory for the Atomic Spins Matrix. int *G = (int *)malloc(n*n * sizeof(int)); // Randomizes seed. srand(time(NULL)); // Fills the Atomic Spins Matrix with "-1" and "1" values from a uniform distribution. for (int i=0; i<n*n; i++) G[i] = ((rand() % 2) * 2) - 1; /* // Reads configuration file. size_t readStatus; FILE *conf_init = fopen("conf-init.bin","rb"); int initG[n*n]; readStatus = fread(&initG, sizeof(int), n*n, conf_init); if (readStatus != n*n) printf("Could not read conf-init.bin file.\n"); fclose(conf_init); // Fills the Atomic Spins Matrix with "-1" and "1" values from configuration file. for (int i=0; i<n*n; i++) G[i] = initG[i]; */ ising(G, w, 10, n); /* // Reads configuration file for state after one iteration. size_t readStatus1; FILE *conf_1 = fopen("conf-1.bin","rb"); int G1[n*n]; readStatus1 = fread(&G1, sizeof(int), n*n, conf_1); if (readStatus1 != n*n) printf("Could not read conf-1.bin file.\n"); fclose(conf_1); // Checks for errors. int errorsNum = 0; for (int i=0; i<n; i++) for (int j=0; j<n; j++) if (G[i*n+j] != G1[i*n+j]) errorsNum++; if (errorsNum == 0) printf("Correct Results!\n"); else printf("Wrong Results. Number of errors: %d\n", errorsNum); // Checks the results. for (int i=0; i<n; i++) { for (int j=0; j<n; j++) { if (G[i*n+j] == G1[i*n+j]) printf("="); else printf("!"); } printf("\n"); } printf("\n\n"); */ return 0; }
/* * ising_cuda_v1.cu * * Created on: Dec 26, 2019 * Author: Charalampos Eleftheriadis */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define N 512 #define threadsNum 64 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // Kernel Function. __global__ void spin(int *G, double *w, int *newG, int n) { // Calculates Atomic Spin index. int index = blockIdx.x*blockDim.x + threadIdx.x; // Checks for out of bounds indexing and if so quits. if (index >= n*n) return; double weightSum = 0; // Calculates weight contribution for each neighboring Atomic Spin and sums it. weightSum += w[0] * G[((index/n - 2 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[1] * G[((index/n - 2 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[2] * G[((index/n - 2 + n)%n) * n + (index)%n]; weightSum += w[3] * G[((index/n - 2 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[4] * G[((index/n - 2 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[5] * G[((index/n - 1 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[6] * G[((index/n - 1 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[7] * G[((index/n - 1 + n)%n) * n + (index)%n]; weightSum += w[8] * G[((index/n - 1 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[9] * G[((index/n - 1 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[10] * G[((index/n + n)%n) * n + (index - 2 + n)%n]; weightSum += w[11] * G[((index/n + n)%n) * n + (index - 1 + n)%n]; // w[12] is not contributing anything. It's the current Atomic Spin. weightSum += w[13] * G[((index/n + n)%n) * n + (index + 1 + n)%n]; weightSum += w[14] * G[((index/n + n)%n) * n + (index + 2 + n)%n]; weightSum += w[15] * G[((index/n + 1 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[16] * G[((index/n + 1 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[17] * G[((index/n + 1 + n)%n) * n + (index)%n]; weightSum += w[18] * G[((index/n + 1 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[19] * G[((index/n + 1 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[20] * G[((index/n + 2 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[21] * G[((index/n + 2 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[22] * G[((index/n + 2 + n)%n) * n + (index)%n]; weightSum += w[23] * G[((index/n + 2 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[24] * G[((index/n + 2 + n)%n) * n + (index + 2 + n)%n]; //! Can it be done more efficiently? if (weightSum > 0.0001) newG[index] = 1; else if (weightSum < -0.0001) newG[index] = -1; else newG[index] = G[index]; } // Kernel Function that checks whether the new Atomic Spins Matrix is the same as the old one. __global__ void check(int *G, int *newG, int n, int *same) { // Calculates Atomic Spin index. int index = blockIdx.x*blockDim.x + threadIdx.x; // Checks for out of bounds indexing and if so quits. if (index >= n*n) return; if (G[index] != newG[index]) *same = 0; } void ising(int *G, double *w, int k, int n) { // Creates and transfers the Weight Matrix to GPU memory. double *w_d; int w_size = 25*sizeof(double); gpuErrchk( hipMalloc((void **) &w_d, w_size) ); gpuErrchk( hipMemcpy(w_d, w, w_size, hipMemcpyHostToDevice) ); // Creates and transfers the Atomic Spins Matrix to GPU memory. int *G_d; int G_size = n*n*sizeof(int); gpuErrchk( hipMalloc((void **) &G_d, G_size) ); gpuErrchk( hipMemcpy(G_d, G, G_size, hipMemcpyHostToDevice) ); // Creates the new Atomic Spins Matrix to GPU memory. int *newG_d; gpuErrchk( hipMalloc((void **) &newG_d, G_size) ); // Creates and transfers a flag that states whether the new Atomic Spins Matrix and the old are the same to GPU memory. int same = 1; int *same_d; gpuErrchk( hipMalloc((void **) &same_d, sizeof(int)) ); gpuErrchk( hipMemcpy(same_d, &same, sizeof(int), hipMemcpyHostToDevice) ); // Creates a temporary variable for Atomic Spins Matrices' pointers swapping. int *temp_d; // Checks if function has to be iterated. for (int i=0; i<k; i++) { // Calls the kernel function balancing load to (n*n+threadsNum-1)/threadsNum blocks with threadsNum threads each. spin<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, w_d, newG_d, n); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); check<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, newG_d, n, same_d); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); gpuErrchk( hipMemcpy(&same, same_d, sizeof(int), hipMemcpyDeviceToHost) ); if (same) break; // Atomix Spin Matrices' pointers swapping. temp_d = G_d; G_d = newG_d; newG_d = temp_d; } // Copies data from GPU to CPU memory. gpuErrchk( hipMemcpy(G, G_d, G_size, hipMemcpyDeviceToHost) ); // Cleanup. gpuErrchk( hipFree(w_d) ); gpuErrchk( hipFree(G_d) ); gpuErrchk( hipFree(newG_d) ); } int main() { // Weight Matrix. double w[] = { 0.004, 0.016, 0.026, 0.016, 0.004, 0.016, 0.071, 0.117, 0.071, 0.016, 0.026, 0.117, 0.000, 0.117, 0.026, 0.016, 0.071, 0.117, 0.071, 0.016, 0.004, 0.016, 0.026, 0.016, 0.004 }; // Number of dimensions for the square Atomic Spins Matrix. int n = N; // Allocates memory for the Atomic Spins Matrix. int *G = (int *)malloc(n*n * sizeof(int)); // Randomizes seed. srand(time(NULL)); // Fills the Atomic Spins Matrix with "-1" and "1" values from a uniform distribution. for (int i=0; i<n*n; i++) G[i] = ((rand() % 2) * 2) - 1; /* // Reads configuration file. size_t readStatus; FILE *conf_init = fopen("conf-init.bin","rb"); int initG[n*n]; readStatus = fread(&initG, sizeof(int), n*n, conf_init); if (readStatus != n*n) printf("Could not read conf-init.bin file.\n"); fclose(conf_init); // Fills the Atomic Spins Matrix with "-1" and "1" values from configuration file. for (int i=0; i<n*n; i++) G[i] = initG[i]; */ ising(G, w, 10, n); /* // Reads configuration file for state after one iteration. size_t readStatus1; FILE *conf_1 = fopen("conf-1.bin","rb"); int G1[n*n]; readStatus1 = fread(&G1, sizeof(int), n*n, conf_1); if (readStatus1 != n*n) printf("Could not read conf-1.bin file.\n"); fclose(conf_1); // Checks for errors. int errorsNum = 0; for (int i=0; i<n; i++) for (int j=0; j<n; j++) if (G[i*n+j] != G1[i*n+j]) errorsNum++; if (errorsNum == 0) printf("Correct Results!\n"); else printf("Wrong Results. Number of errors: %d\n", errorsNum); // Checks the results. for (int i=0; i<n; i++) { for (int j=0; j<n; j++) { if (G[i*n+j] == G1[i*n+j]) printf("="); else printf("!"); } printf("\n"); } printf("\n\n"); */ return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/* * ising_cuda_v1.cu * * Created on: Dec 26, 2019 * Author: Charalampos Eleftheriadis */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define N 512 #define threadsNum 64 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // Kernel Function. __global__ void spin(int *G, double *w, int *newG, int n) { // Calculates Atomic Spin index. int index = blockIdx.x*blockDim.x + threadIdx.x; // Checks for out of bounds indexing and if so quits. if (index >= n*n) return; double weightSum = 0; // Calculates weight contribution for each neighboring Atomic Spin and sums it. weightSum += w[0] * G[((index/n - 2 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[1] * G[((index/n - 2 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[2] * G[((index/n - 2 + n)%n) * n + (index)%n]; weightSum += w[3] * G[((index/n - 2 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[4] * G[((index/n - 2 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[5] * G[((index/n - 1 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[6] * G[((index/n - 1 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[7] * G[((index/n - 1 + n)%n) * n + (index)%n]; weightSum += w[8] * G[((index/n - 1 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[9] * G[((index/n - 1 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[10] * G[((index/n + n)%n) * n + (index - 2 + n)%n]; weightSum += w[11] * G[((index/n + n)%n) * n + (index - 1 + n)%n]; // w[12] is not contributing anything. It's the current Atomic Spin. weightSum += w[13] * G[((index/n + n)%n) * n + (index + 1 + n)%n]; weightSum += w[14] * G[((index/n + n)%n) * n + (index + 2 + n)%n]; weightSum += w[15] * G[((index/n + 1 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[16] * G[((index/n + 1 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[17] * G[((index/n + 1 + n)%n) * n + (index)%n]; weightSum += w[18] * G[((index/n + 1 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[19] * G[((index/n + 1 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[20] * G[((index/n + 2 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[21] * G[((index/n + 2 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[22] * G[((index/n + 2 + n)%n) * n + (index)%n]; weightSum += w[23] * G[((index/n + 2 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[24] * G[((index/n + 2 + n)%n) * n + (index + 2 + n)%n]; //! Can it be done more efficiently? if (weightSum > 0.0001) newG[index] = 1; else if (weightSum < -0.0001) newG[index] = -1; else newG[index] = G[index]; } // Kernel Function that checks whether the new Atomic Spins Matrix is the same as the old one. __global__ void check(int *G, int *newG, int n, int *same) { // Calculates Atomic Spin index. int index = blockIdx.x*blockDim.x + threadIdx.x; // Checks for out of bounds indexing and if so quits. if (index >= n*n) return; if (G[index] != newG[index]) *same = 0; } void ising(int *G, double *w, int k, int n) { // Creates and transfers the Weight Matrix to GPU memory. double *w_d; int w_size = 25*sizeof(double); gpuErrchk( hipMalloc((void **) &w_d, w_size) ); gpuErrchk( hipMemcpy(w_d, w, w_size, hipMemcpyHostToDevice) ); // Creates and transfers the Atomic Spins Matrix to GPU memory. int *G_d; int G_size = n*n*sizeof(int); gpuErrchk( hipMalloc((void **) &G_d, G_size) ); gpuErrchk( hipMemcpy(G_d, G, G_size, hipMemcpyHostToDevice) ); // Creates the new Atomic Spins Matrix to GPU memory. int *newG_d; gpuErrchk( hipMalloc((void **) &newG_d, G_size) ); // Creates and transfers a flag that states whether the new Atomic Spins Matrix and the old are the same to GPU memory. int same = 1; int *same_d; gpuErrchk( hipMalloc((void **) &same_d, sizeof(int)) ); gpuErrchk( hipMemcpy(same_d, &same, sizeof(int), hipMemcpyHostToDevice) ); // Creates a temporary variable for Atomic Spins Matrices' pointers swapping. int *temp_d; // Checks if function has to be iterated. for (int i=0; i<k; i++) { // Calls the kernel function balancing load to (n*n+threadsNum-1)/threadsNum blocks with threadsNum threads each. spin<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, w_d, newG_d, n); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); check<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, newG_d, n, same_d); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); gpuErrchk( hipMemcpy(&same, same_d, sizeof(int), hipMemcpyDeviceToHost) ); if (same) break; // Atomix Spin Matrices' pointers swapping. temp_d = G_d; G_d = newG_d; newG_d = temp_d; } // Copies data from GPU to CPU memory. gpuErrchk( hipMemcpy(G, G_d, G_size, hipMemcpyDeviceToHost) ); // Cleanup. gpuErrchk( hipFree(w_d) ); gpuErrchk( hipFree(G_d) ); gpuErrchk( hipFree(newG_d) ); } int main() { // Weight Matrix. double w[] = { 0.004, 0.016, 0.026, 0.016, 0.004, 0.016, 0.071, 0.117, 0.071, 0.016, 0.026, 0.117, 0.000, 0.117, 0.026, 0.016, 0.071, 0.117, 0.071, 0.016, 0.004, 0.016, 0.026, 0.016, 0.004 }; // Number of dimensions for the square Atomic Spins Matrix. int n = N; // Allocates memory for the Atomic Spins Matrix. int *G = (int *)malloc(n*n * sizeof(int)); // Randomizes seed. srand(time(NULL)); // Fills the Atomic Spins Matrix with "-1" and "1" values from a uniform distribution. for (int i=0; i<n*n; i++) G[i] = ((rand() % 2) * 2) - 1; /* // Reads configuration file. size_t readStatus; FILE *conf_init = fopen("conf-init.bin","rb"); int initG[n*n]; readStatus = fread(&initG, sizeof(int), n*n, conf_init); if (readStatus != n*n) printf("Could not read conf-init.bin file.\n"); fclose(conf_init); // Fills the Atomic Spins Matrix with "-1" and "1" values from configuration file. for (int i=0; i<n*n; i++) G[i] = initG[i]; */ ising(G, w, 10, n); /* // Reads configuration file for state after one iteration. size_t readStatus1; FILE *conf_1 = fopen("conf-1.bin","rb"); int G1[n*n]; readStatus1 = fread(&G1, sizeof(int), n*n, conf_1); if (readStatus1 != n*n) printf("Could not read conf-1.bin file.\n"); fclose(conf_1); // Checks for errors. int errorsNum = 0; for (int i=0; i<n; i++) for (int j=0; j<n; j++) if (G[i*n+j] != G1[i*n+j]) errorsNum++; if (errorsNum == 0) printf("Correct Results!\n"); else printf("Wrong Results. Number of errors: %d\n", errorsNum); // Checks the results. for (int i=0; i<n; i++) { for (int j=0; j<n; j++) { if (G[i*n+j] == G1[i*n+j]) printf("="); else printf("!"); } printf("\n"); } printf("\n\n"); */ return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z4spinPiPdS_i .globl _Z4spinPiPdS_i .p2align 8 .type _Z4spinPiPdS_i,@function _Z4spinPiPdS_i: s_clause 0x1 s_load_b32 s3, s[0:1], 0x2c s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s3, s3, 0xffff s_delay_alu instid0(SALU_CYCLE_1) v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1] s_mul_i32 s3, s2, s2 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_cmp_gt_i32_e32 vcc_lo, s3, v1 s_and_saveexec_b32 s3, vcc_lo s_cbranch_execz .LBB0_8 s_ashr_i32 s4, s2, 31 v_ashrrev_i32_e32 v3, 31, v1 s_add_i32 s3, s2, s4 s_load_b128 s[12:15], s[0:1], 0x0 s_xor_b32 s3, s3, s4 v_add_nc_u32_e32 v7, s2, v1 v_cvt_f32_u32_e32 v0, s3 s_sub_i32 s5, 0, s3 v_add_nc_u32_e32 v4, v1, v3 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_nc_u32_e32 v10, -1, v7 v_rcp_iflag_f32_e32 v0, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_xor_b32_e32 v4, v4, v3 v_xor_b32_e32 v3, s4, v3 s_waitcnt_depctr 0xfff v_mul_f32_e32 v0, 0x4f7ffffe, v0 v_cvt_u32_f32_e32 v0, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_mul_lo_u32 v2, s5, v0 s_waitcnt lgkmcnt(0) s_load_b256 s[4:11], s[14:15], 0x0 v_mul_hi_u32 v2, v0, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v0, v0, v2 v_mul_hi_u32 v2, v4, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v5, v2, s3 v_sub_nc_u32_e32 v4, v4, v5 v_add_nc_u32_e32 v5, 1, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v6, s3, v4 v_cmp_le_u32_e32 vcc_lo, s3, v4 v_cndmask_b32_e32 v2, v2, v5, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v4, v4, v6, vcc_lo v_add_nc_u32_e32 v5, 1, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_le_u32_e32 vcc_lo, s3, v4 v_dual_cndmask_b32 v2, v2, v5 :: v_dual_add_nc_u32 v5, -2, v7 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_xor_b32_e32 v2, v2, v3 v_ashrrev_i32_e32 v6, 31, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v2, v2, v3 v_add_nc_u32_e32 v5, v5, v6 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add3_u32 v3, s2, -2, v2 v_xor_b32_e32 v5, v5, v6 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v4, 31, v3 v_mul_hi_u32 v9, v5, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v3, v3, v4 v_mul_lo_u32 v9, v9, s3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_xor_b32_e32 v3, v3, v4 v_mul_hi_u32 v8, v3, v0 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_nc_u32_e32 v5, v5, v9 v_subrev_nc_u32_e32 v9, s3, v5 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v8, v8, s3 v_sub_nc_u32_e32 v3, v3, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v8, s3, v3 v_cmp_le_u32_e32 vcc_lo, s3, v3 v_cndmask_b32_e32 v3, v3, v8, vcc_lo v_cmp_le_u32_e32 vcc_lo, s3, v5 v_ashrrev_i32_e32 v8, 31, v10 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v11, s3, v3 v_cndmask_b32_e32 v5, v5, v9, vcc_lo v_cmp_le_u32_e32 vcc_lo, s3, v3 v_dual_cndmask_b32 v3, v3, v11 :: v_dual_add_nc_u32 v10, v10, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4) v_xor_b32_e32 v9, v10, v8 v_subrev_nc_u32_e32 v10, s3, v5 v_cmp_le_u32_e32 vcc_lo, s3, v5 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_xor_b32_e32 v3, v3, v4 v_mul_hi_u32 v11, v9, v0 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e32 v5, v5, v10, vcc_lo v_sub_nc_u32_e32 v3, v3, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_xor_b32_e32 v4, v5, v6 v_mul_lo_u32 v5, v11, s3 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_mul_lo_u32 v10, v3, s2 v_sub_nc_u32_e32 v4, v4, v6 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v3, v9, v5 v_add_nc_u32_e32 v5, v10, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v9, s3, v3 v_ashrrev_i32_e32 v6, 31, v5 v_cmp_le_u32_e32 vcc_lo, s3, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_lshlrev_b64 v[5:6], 2, v[5:6] v_cndmask_b32_e32 v3, v3, v9, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v5, vcc_lo, s12, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s13, v6, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_subrev_nc_u32_e32 v9, s3, v3 v_cmp_le_u32_e32 vcc_lo, s3, v3 global_load_b32 v11, v[5:6], off v_cndmask_b32_e32 v3, v3, v9, vcc_lo v_xor_b32_e32 v3, v3, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v3, v3, v8 v_add_nc_u32_e32 v8, 1, v7 v_add_nc_u32_e32 v5, v3, v10 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v9, 31, v8 v_ashrrev_i32_e32 v6, 31, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v8, v8, v9 v_lshlrev_b64 v[5:6], 2, v[5:6] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_xor_b32_e32 v8, v8, v9 v_add_co_u32 v5, vcc_lo, s12, v5 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_ci_u32_e32 v6, vcc_lo, s13, v6, vcc_lo v_mul_hi_u32 v12, v8, v0 global_load_b32 v13, v[5:6], off v_mul_lo_u32 v5, v2, s2 v_mul_lo_u32 v6, v12, s3 v_add_nc_u32_e32 v12, 2, v7 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v5, v1, v5 v_ashrrev_i32_e32 v14, 31, v12 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_sub_nc_u32_e32 v8, v8, v6 v_add_nc_u32_e32 v6, v5, v10 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_nc_u32_e32 v12, v12, v14 v_subrev_nc_u32_e32 v15, s3, v8 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4) v_ashrrev_i32_e32 v7, 31, v6 v_cmp_le_u32_e32 vcc_lo, s3, v8 v_xor_b32_e32 v12, v12, v14 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_lshlrev_b64 v[6:7], 2, v[6:7] v_cndmask_b32_e32 v8, v8, v15, vcc_lo v_mul_hi_u32 v16, v12, v0 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v6, vcc_lo, s12, v6 v_add_co_ci_u32_e32 v7, vcc_lo, s13, v7, vcc_lo s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v15, s3, v8 v_cmp_le_u32_e32 vcc_lo, s3, v8 v_cndmask_b32_e32 v8, v8, v15, vcc_lo global_load_b32 v15, v[6:7], off v_mul_lo_u32 v7, v16, s3 v_xor_b32_e32 v6, v8, v9 v_add3_u32 v8, s2, -1, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_sub_nc_u32_e32 v6, v6, v9 v_sub_nc_u32_e32 v9, v12, v7 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_ashrrev_i32_e32 v12, 31, v8 v_add_nc_u32_e32 v7, v6, v10 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v16, s3, v9 v_add_nc_u32_e32 v17, v8, v12 v_cmp_le_u32_e32 vcc_lo, s3, v9 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_ashrrev_i32_e32 v8, 31, v7 v_cndmask_b32_e32 v9, v9, v16, vcc_lo s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_xor_b32_e32 v16, v17, v12 v_lshlrev_b64 v[7:8], 2, v[7:8] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v17, s3, v9 v_mul_hi_u32 v18, v16, v0 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v7, vcc_lo, s12, v7 v_add_co_ci_u32_e32 v8, vcc_lo, s13, v8, vcc_lo v_cmp_le_u32_e32 vcc_lo, s3, v9 v_cndmask_b32_e32 v9, v9, v17, vcc_lo global_load_b32 v17, v[7:8], off v_mul_lo_u32 v7, v18, s3 v_xor_b32_e32 v8, v9, v14 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v9, v16, v7 v_sub_nc_u32_e32 v7, v8, v14 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v14, s3, v9 v_add_nc_u32_e32 v8, v7, v10 v_cmp_le_u32_e32 vcc_lo, s3, v9 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e32 v10, v9, v14, vcc_lo v_ashrrev_i32_e32 v9, 31, v8 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v14, s3, v10 v_lshlrev_b64 v[8:9], 2, v[8:9] v_cmp_le_u32_e32 vcc_lo, s3, v10 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e32 v10, v10, v14, vcc_lo v_add_co_u32 v8, vcc_lo, s12, v8 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo v_xor_b32_e32 v10, v10, v12 global_load_b32 v16, v[8:9], off v_sub_nc_u32_e32 v8, v10, v12 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v10, v8, s2 v_add_nc_u32_e32 v8, v10, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v9, 31, v8 v_lshlrev_b64 v[8:9], 2, v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v8, vcc_lo, s12, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo global_load_b32 v18, v[8:9], off v_add_nc_u32_e32 v8, v10, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v9, 31, v8 v_lshlrev_b64 v[8:9], 2, v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v8, vcc_lo, s12, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo global_load_b32 v19, v[8:9], off v_add_nc_u32_e32 v8, v10, v5 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v9, 31, v8 v_lshlrev_b64 v[8:9], 2, v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v8, vcc_lo, s12, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo global_load_b32 v20, v[8:9], off v_add_nc_u32_e32 v9, s2, v2 v_add_nc_u32_e32 v8, v10, v6 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v12, 31, v9 v_add_nc_u32_e32 v14, v9, v12 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v9, 31, v8 v_xor_b32_e32 v14, v14, v12 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[8:9], 2, v[8:9] v_mul_hi_u32 v21, v14, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v8, vcc_lo, s12, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo global_load_b32 v22, v[8:9], off v_mul_lo_u32 v8, v21, s3 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v9, v14, v8 v_add_nc_u32_e32 v8, v10, v7 v_subrev_nc_u32_e32 v14, s3, v9 v_cmp_le_u32_e32 vcc_lo, s3, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_cndmask_b32_e32 v14, v9, v14, vcc_lo v_ashrrev_i32_e32 v9, 31, v8 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v21, s3, v14 v_lshlrev_b64 v[8:9], 2, v[8:9] v_cmp_le_u32_e32 vcc_lo, s3, v14 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e32 v14, v14, v21, vcc_lo v_add_co_u32 v8, vcc_lo, s12, v8 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo v_xor_b32_e32 v14, v14, v12 global_load_b32 v21, v[8:9], off s_waitcnt vmcnt(9) v_cvt_f64_i32_e32 v[10:11], v11 v_sub_nc_u32_e32 v8, v14, v12 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v23, v8, s2 v_add_nc_u32_e32 v8, v23, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v9, 31, v8 v_lshlrev_b64 v[8:9], 2, v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v8, vcc_lo, s12, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo global_load_b32 v24, v[8:9], off s_waitcnt lgkmcnt(0) v_fma_f64 v[10:11], s[4:5], v[10:11], 0 v_add_nc_u32_e32 v8, v23, v3 s_waitcnt vmcnt(9) v_cvt_f64_i32_e32 v[12:13], v13 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v9, 31, v8 v_lshlrev_b64 v[8:9], 2, v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v8, vcc_lo, s12, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo global_load_b32 v25, v[8:9], off v_add3_u32 v9, s2, 1, v2 v_add_nc_u32_e32 v8, v23, v6 v_add3_u32 v2, s2, 2, v2 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v26, 31, v9 v_fma_f64 v[10:11], s[6:7], v[12:13], v[10:11] s_waitcnt vmcnt(9) v_cvt_f64_i32_e32 v[14:15], v15 v_fma_f64 v[10:11], s[8:9], v[14:15], v[10:11] v_add_nc_u32_e32 v14, v9, v26 s_waitcnt vmcnt(8) v_cvt_f64_i32_e32 v[12:13], v17 v_ashrrev_i32_e32 v9, 31, v8 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_xor_b32_e32 v14, v14, v26 v_lshlrev_b64 v[8:9], 2, v[8:9] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_hi_u32 v15, v14, v0 v_add_co_u32 v8, vcc_lo, s12, v8 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo global_load_b32 v17, v[8:9], off v_mul_lo_u32 v8, v15, s3 v_sub_nc_u32_e32 v9, v14, v8 v_add_nc_u32_e32 v8, v23, v7 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v27, s3, v9 v_cmp_le_u32_e32 vcc_lo, s3, v9 v_fma_f64 v[10:11], s[10:11], v[12:13], v[10:11] v_cndmask_b32_e32 v12, v9, v27, vcc_lo v_ashrrev_i32_e32 v9, 31, v8 s_waitcnt vmcnt(8) v_cvt_f64_i32_e32 v[14:15], v16 s_load_b256 s[4:11], s[14:15], 0x20 v_subrev_nc_u32_e32 v13, s3, v12 v_lshlrev_b64 v[8:9], 2, v[8:9] v_cmp_le_u32_e32 vcc_lo, s3, v12 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e32 v12, v12, v13, vcc_lo v_add_co_u32 v8, vcc_lo, s12, v8 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo v_xor_b32_e32 v12, v12, v26 global_load_b32 v16, v[8:9], off v_sub_nc_u32_e32 v8, v12, v26 v_ashrrev_i32_e32 v26, 31, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_lo_u32 v23, v8, s2 v_add_nc_u32_e32 v2, v2, v26 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_xor_b32_e32 v2, v2, v26 v_add_nc_u32_e32 v8, v23, v4 s_delay_alu instid0(VALU_DEP_2) v_mul_hi_u32 v0, v2, v0 s_waitcnt vmcnt(8) v_cvt_f64_i32_e32 v[12:13], v18 s_waitcnt lgkmcnt(0) v_fma_f64 v[10:11], s[4:5], v[14:15], v[10:11] v_ashrrev_i32_e32 v9, 31, v8 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_lo_u32 v0, v0, s3 v_lshlrev_b64 v[8:9], 2, v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v8, vcc_lo, s12, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo s_delay_alu instid0(VALU_DEP_4) v_sub_nc_u32_e32 v0, v2, v0 global_load_b32 v18, v[8:9], off v_add_nc_u32_e32 v8, v23, v3 v_subrev_nc_u32_e32 v2, s3, v0 s_waitcnt vmcnt(8) v_cvt_f64_i32_e32 v[14:15], v19 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v9, 31, v8 v_lshlrev_b64 v[8:9], 2, v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_add_co_u32 v8, vcc_lo, s12, v8 v_fma_f64 v[10:11], s[6:7], v[12:13], v[10:11] v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo global_load_b32 v19, v[8:9], off v_add_nc_u32_e32 v8, v23, v5 s_waitcnt vmcnt(8) v_cvt_f64_i32_e32 v[12:13], v20 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v9, 31, v8 v_lshlrev_b64 v[8:9], 2, v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v8, vcc_lo, s12, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo global_load_b32 v20, v[8:9], off v_add_nc_u32_e32 v8, v23, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) v_ashrrev_i32_e32 v9, 31, v8 v_fma_f64 v[10:11], s[8:9], v[14:15], v[10:11] s_waitcnt vmcnt(8) v_cvt_f64_i32_e32 v[14:15], v22 v_lshlrev_b64 v[8:9], 2, v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v8, vcc_lo, s12, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo v_cmp_le_u32_e32 vcc_lo, s3, v0 global_load_b32 v22, v[8:9], off v_add_nc_u32_e32 v8, v23, v7 v_cndmask_b32_e32 v0, v0, v2, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v9, 31, v8 v_subrev_nc_u32_e32 v2, s3, v0 v_cmp_le_u32_e32 vcc_lo, s3, v0 s_mov_b32 s3, 0x3f1a36e2 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshlrev_b64 v[8:9], 2, v[8:9] v_cndmask_b32_e32 v0, v0, v2, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v8, vcc_lo, s12, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo s_delay_alu instid0(VALU_DEP_3) v_xor_b32_e32 v0, v0, v26 v_fma_f64 v[10:11], s[10:11], v[12:13], v[10:11] s_load_b256 s[4:11], s[14:15], 0x40 s_waitcnt vmcnt(8) v_cvt_f64_i32_e32 v[12:13], v21 global_load_b32 v21, v[8:9], off v_sub_nc_u32_e32 v0, v0, v26 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mul_lo_u32 v0, v0, s2 s_mov_b32 s2, 0xeb1c432d v_add_nc_u32_e32 v8, v0, v4 v_add_nc_u32_e32 v2, v0, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v9, 31, v8 v_ashrrev_i32_e32 v3, 31, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[8:9], 2, v[8:9] v_lshlrev_b64 v[2:3], 2, v[2:3] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v8, vcc_lo, s12, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s13, v9, vcc_lo s_waitcnt lgkmcnt(0) v_fma_f64 v[10:11], s[4:5], v[14:15], v[10:11] s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) v_add_co_u32 v2, vcc_lo, s12, v2 s_waitcnt vmcnt(8) v_cvt_f64_i32_e32 v[14:15], v24 v_add_co_ci_u32_e32 v3, vcc_lo, s13, v3, vcc_lo v_fma_f64 v[10:11], s[6:7], v[12:13], v[10:11] s_clause 0x1 global_load_b32 v12, v[8:9], off global_load_b32 v13, v[2:3], off s_waitcnt vmcnt(9) v_cvt_f64_i32_e32 v[8:9], v25 v_add_nc_u32_e32 v2, v0, v5 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[2:3], 2, v[2:3] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s12, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s13, v3, vcc_lo v_fma_f64 v[10:11], s[8:9], v[14:15], v[10:11] global_load_b32 v14, v[2:3], off v_add_nc_u32_e32 v2, v0, v6 v_add_nc_u32_e32 v6, v0, v7 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v3, 31, v2 v_ashrrev_i32_e32 v7, 31, v6 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[2:3], 2, v[2:3] v_lshlrev_b64 v[6:7], 2, v[6:7] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v2, vcc_lo, s12, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s13, v3, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v6, vcc_lo, s12, v6 v_add_co_ci_u32_e32 v7, vcc_lo, s13, v7, vcc_lo global_load_b32 v0, v[6:7], off s_waitcnt vmcnt(10) v_cvt_f64_i32_e32 v[4:5], v17 v_fma_f64 v[8:9], s[10:11], v[8:9], v[10:11] s_load_b256 s[4:11], s[14:15], 0x68 s_waitcnt vmcnt(9) v_cvt_f64_i32_e32 v[10:11], v16 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) v_fma_f64 v[4:5], s[4:5], v[4:5], v[8:9] global_load_b32 v8, v[2:3], off s_waitcnt vmcnt(9) v_cvt_f64_i32_e32 v[2:3], v18 v_fma_f64 v[4:5], s[6:7], v[10:11], v[4:5] s_waitcnt vmcnt(8) v_cvt_f64_i32_e32 v[6:7], v19 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) v_fma_f64 v[2:3], s[8:9], v[2:3], v[4:5] s_waitcnt vmcnt(7) v_cvt_f64_i32_e32 v[4:5], v20 v_fma_f64 v[2:3], s[10:11], v[6:7], v[2:3] s_load_b256 s[4:11], s[14:15], 0x88 s_waitcnt vmcnt(6) v_cvt_f64_i32_e32 v[6:7], v22 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) v_fma_f64 v[2:3], s[4:5], v[4:5], v[2:3] s_waitcnt vmcnt(5) v_cvt_f64_i32_e32 v[4:5], v21 v_fma_f64 v[2:3], s[6:7], v[6:7], v[2:3] s_waitcnt vmcnt(4) v_cvt_f64_i32_e32 v[6:7], v12 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) v_fma_f64 v[2:3], s[8:9], v[4:5], v[2:3] s_waitcnt vmcnt(3) v_cvt_f64_i32_e32 v[4:5], v13 v_fma_f64 v[2:3], s[10:11], v[6:7], v[2:3] s_load_b256 s[4:11], s[14:15], 0xa8 s_waitcnt vmcnt(2) v_cvt_f64_i32_e32 v[6:7], v14 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) v_fma_f64 v[2:3], s[4:5], v[4:5], v[2:3] s_waitcnt vmcnt(0) v_cvt_f64_i32_e32 v[4:5], v8 v_fma_f64 v[2:3], s[6:7], v[6:7], v[2:3] s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_fma_f64 v[2:3], s[8:9], v[4:5], v[2:3] v_cvt_f64_i32_e32 v[4:5], v0 v_fma_f64 v[3:4], s[10:11], v[4:5], v[2:3] v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_cmp_nlt_f64_e32 vcc_lo, s[2:3], v[3:4] s_and_saveexec_b32 s3, vcc_lo s_xor_b32 s4, exec_lo, s3 s_cbranch_execz .LBB0_5 s_mov_b32 s3, 0xbf1a36e2 v_mov_b32_e32 v0, -1 v_cmp_ngt_f64_e32 vcc_lo, s[2:3], v[3:4] s_and_saveexec_b32 s2, vcc_lo s_cbranch_execz .LBB0_4 v_lshlrev_b64 v[3:4], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s12, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s13, v4, vcc_lo global_load_b32 v0, v[3:4], off .LBB0_4: s_or_b32 exec_lo, exec_lo, s2 .LBB0_5: s_and_not1_saveexec_b32 s2, s4 s_cbranch_execz .LBB0_7 s_waitcnt vmcnt(0) v_mov_b32_e32 v0, 1 .LBB0_7: s_or_b32 exec_lo, exec_lo, s2 s_load_b64 s[0:1], s[0:1], 0x10 v_lshlrev_b64 v[1:2], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v1, vcc_lo, s0, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo s_waitcnt vmcnt(0) global_store_b32 v[1:2], v0, off .LBB0_8: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z4spinPiPdS_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 28 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z4spinPiPdS_i, .Lfunc_end0-_Z4spinPiPdS_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z5checkPiS_iS_ .globl _Z5checkPiS_iS_ .p2align 8 .type _Z5checkPiS_iS_,@function _Z5checkPiS_iS_: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_mul_i32 s3, s3, s3 v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB1_3 s_load_b128 s[4:7], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo global_load_b32 v2, v[2:3], off global_load_b32 v0, v[0:1], off s_waitcnt vmcnt(0) v_cmp_ne_u32_e32 vcc_lo, v2, v0 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB1_3 s_load_b64 s[0:1], s[0:1], 0x18 v_mov_b32_e32 v0, 0 s_waitcnt lgkmcnt(0) global_store_b32 v0, v0, s[0:1] .LBB1_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z5checkPiS_iS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z5checkPiS_iS_, .Lfunc_end1-_Z5checkPiS_iS_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z4spinPiPdS_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z4spinPiPdS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 28 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z5checkPiS_iS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z5checkPiS_iS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* * ising_cuda_v1.cu * * Created on: Dec 26, 2019 * Author: Charalampos Eleftheriadis */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define N 512 #define threadsNum 64 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // Kernel Function. __global__ void spin(int *G, double *w, int *newG, int n) { // Calculates Atomic Spin index. int index = blockIdx.x*blockDim.x + threadIdx.x; // Checks for out of bounds indexing and if so quits. if (index >= n*n) return; double weightSum = 0; // Calculates weight contribution for each neighboring Atomic Spin and sums it. weightSum += w[0] * G[((index/n - 2 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[1] * G[((index/n - 2 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[2] * G[((index/n - 2 + n)%n) * n + (index)%n]; weightSum += w[3] * G[((index/n - 2 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[4] * G[((index/n - 2 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[5] * G[((index/n - 1 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[6] * G[((index/n - 1 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[7] * G[((index/n - 1 + n)%n) * n + (index)%n]; weightSum += w[8] * G[((index/n - 1 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[9] * G[((index/n - 1 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[10] * G[((index/n + n)%n) * n + (index - 2 + n)%n]; weightSum += w[11] * G[((index/n + n)%n) * n + (index - 1 + n)%n]; // w[12] is not contributing anything. It's the current Atomic Spin. weightSum += w[13] * G[((index/n + n)%n) * n + (index + 1 + n)%n]; weightSum += w[14] * G[((index/n + n)%n) * n + (index + 2 + n)%n]; weightSum += w[15] * G[((index/n + 1 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[16] * G[((index/n + 1 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[17] * G[((index/n + 1 + n)%n) * n + (index)%n]; weightSum += w[18] * G[((index/n + 1 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[19] * G[((index/n + 1 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[20] * G[((index/n + 2 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[21] * G[((index/n + 2 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[22] * G[((index/n + 2 + n)%n) * n + (index)%n]; weightSum += w[23] * G[((index/n + 2 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[24] * G[((index/n + 2 + n)%n) * n + (index + 2 + n)%n]; //! Can it be done more efficiently? if (weightSum > 0.0001) newG[index] = 1; else if (weightSum < -0.0001) newG[index] = -1; else newG[index] = G[index]; } // Kernel Function that checks whether the new Atomic Spins Matrix is the same as the old one. __global__ void check(int *G, int *newG, int n, int *same) { // Calculates Atomic Spin index. int index = blockIdx.x*blockDim.x + threadIdx.x; // Checks for out of bounds indexing and if so quits. if (index >= n*n) return; if (G[index] != newG[index]) *same = 0; } void ising(int *G, double *w, int k, int n) { // Creates and transfers the Weight Matrix to GPU memory. double *w_d; int w_size = 25*sizeof(double); gpuErrchk( hipMalloc((void **) &w_d, w_size) ); gpuErrchk( hipMemcpy(w_d, w, w_size, hipMemcpyHostToDevice) ); // Creates and transfers the Atomic Spins Matrix to GPU memory. int *G_d; int G_size = n*n*sizeof(int); gpuErrchk( hipMalloc((void **) &G_d, G_size) ); gpuErrchk( hipMemcpy(G_d, G, G_size, hipMemcpyHostToDevice) ); // Creates the new Atomic Spins Matrix to GPU memory. int *newG_d; gpuErrchk( hipMalloc((void **) &newG_d, G_size) ); // Creates and transfers a flag that states whether the new Atomic Spins Matrix and the old are the same to GPU memory. int same = 1; int *same_d; gpuErrchk( hipMalloc((void **) &same_d, sizeof(int)) ); gpuErrchk( hipMemcpy(same_d, &same, sizeof(int), hipMemcpyHostToDevice) ); // Creates a temporary variable for Atomic Spins Matrices' pointers swapping. int *temp_d; // Checks if function has to be iterated. for (int i=0; i<k; i++) { // Calls the kernel function balancing load to (n*n+threadsNum-1)/threadsNum blocks with threadsNum threads each. spin<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, w_d, newG_d, n); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); check<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, newG_d, n, same_d); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); gpuErrchk( hipMemcpy(&same, same_d, sizeof(int), hipMemcpyDeviceToHost) ); if (same) break; // Atomix Spin Matrices' pointers swapping. temp_d = G_d; G_d = newG_d; newG_d = temp_d; } // Copies data from GPU to CPU memory. gpuErrchk( hipMemcpy(G, G_d, G_size, hipMemcpyDeviceToHost) ); // Cleanup. gpuErrchk( hipFree(w_d) ); gpuErrchk( hipFree(G_d) ); gpuErrchk( hipFree(newG_d) ); } int main() { // Weight Matrix. double w[] = { 0.004, 0.016, 0.026, 0.016, 0.004, 0.016, 0.071, 0.117, 0.071, 0.016, 0.026, 0.117, 0.000, 0.117, 0.026, 0.016, 0.071, 0.117, 0.071, 0.016, 0.004, 0.016, 0.026, 0.016, 0.004 }; // Number of dimensions for the square Atomic Spins Matrix. int n = N; // Allocates memory for the Atomic Spins Matrix. int *G = (int *)malloc(n*n * sizeof(int)); // Randomizes seed. srand(time(NULL)); // Fills the Atomic Spins Matrix with "-1" and "1" values from a uniform distribution. for (int i=0; i<n*n; i++) G[i] = ((rand() % 2) * 2) - 1; /* // Reads configuration file. size_t readStatus; FILE *conf_init = fopen("conf-init.bin","rb"); int initG[n*n]; readStatus = fread(&initG, sizeof(int), n*n, conf_init); if (readStatus != n*n) printf("Could not read conf-init.bin file.\n"); fclose(conf_init); // Fills the Atomic Spins Matrix with "-1" and "1" values from configuration file. for (int i=0; i<n*n; i++) G[i] = initG[i]; */ ising(G, w, 10, n); /* // Reads configuration file for state after one iteration. size_t readStatus1; FILE *conf_1 = fopen("conf-1.bin","rb"); int G1[n*n]; readStatus1 = fread(&G1, sizeof(int), n*n, conf_1); if (readStatus1 != n*n) printf("Could not read conf-1.bin file.\n"); fclose(conf_1); // Checks for errors. int errorsNum = 0; for (int i=0; i<n; i++) for (int j=0; j<n; j++) if (G[i*n+j] != G1[i*n+j]) errorsNum++; if (errorsNum == 0) printf("Correct Results!\n"); else printf("Wrong Results. Number of errors: %d\n", errorsNum); // Checks the results. for (int i=0; i<n; i++) { for (int j=0; j<n; j++) { if (G[i*n+j] == G1[i*n+j]) printf("="); else printf("!"); } printf("\n"); } printf("\n\n"); */ return 0; }
.text .file "ising_cuda_v1.hip" .globl _Z19__device_stub__spinPiPdS_i # -- Begin function _Z19__device_stub__spinPiPdS_i .p2align 4, 0x90 .type _Z19__device_stub__spinPiPdS_i,@function _Z19__device_stub__spinPiPdS_i: # @_Z19__device_stub__spinPiPdS_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z4spinPiPdS_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z19__device_stub__spinPiPdS_i, .Lfunc_end0-_Z19__device_stub__spinPiPdS_i .cfi_endproc # -- End function .globl _Z20__device_stub__checkPiS_iS_ # -- Begin function _Z20__device_stub__checkPiS_iS_ .p2align 4, 0x90 .type _Z20__device_stub__checkPiS_iS_,@function _Z20__device_stub__checkPiS_iS_: # @_Z20__device_stub__checkPiS_iS_ .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 4(%rsp) movq %rcx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 4(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z5checkPiS_iS_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z20__device_stub__checkPiS_iS_, .Lfunc_end1-_Z20__device_stub__checkPiS_iS_ .cfi_endproc # -- End function .globl _Z5isingPiPdii # -- Begin function _Z5isingPiPdii .p2align 4, 0x90 .type _Z5isingPiPdii,@function _Z5isingPiPdii: # @_Z5isingPiPdii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $168, %rsp .cfi_def_cfa_offset 224 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %ecx, %ebp movl %edx, %r14d movq %rsi, %rbx movq %rdi, %r15 leaq 32(%rsp), %rdi movl $200, %esi callq hipMalloc testl %eax, %eax jne .LBB2_1 # %bb.3: # %_Z9gpuAssert10hipError_tPKcib.exit movq 32(%rsp), %rdi movl $200, %edx movq %rbx, %rsi movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_4 # %bb.5: # %_Z9gpuAssert10hipError_tPKcib.exit29 movl %ebp, %ebx imull %ebx, %ebx leal (,%rbx,4), %eax movslq %eax, %r12 movq %rsp, %rdi movq %r12, %rsi callq hipMalloc testl %eax, %eax jne .LBB2_6 # %bb.7: # %_Z9gpuAssert10hipError_tPKcib.exit31 movq (%rsp), %rdi movq %r15, %rsi movq %r12, %rdx movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_8 # %bb.9: # %_Z9gpuAssert10hipError_tPKcib.exit33 leaq 16(%rsp), %rdi movq %r12, %rsi callq hipMalloc testl %eax, %eax jne .LBB2_10 # %bb.11: # %_Z9gpuAssert10hipError_tPKcib.exit35 movq %r15, 160(%rsp) # 8-byte Spill movl $1, 8(%rsp) leaq 24(%rsp), %rdi movl $4, %esi callq hipMalloc testl %eax, %eax jne .LBB2_12 # %bb.13: # %_Z9gpuAssert10hipError_tPKcib.exit37 movq %r12, 152(%rsp) # 8-byte Spill movq 24(%rsp), %rdi leaq 8(%rsp), %rsi movl $4, %edx movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_40 # %bb.14: # %_Z9gpuAssert10hipError_tPKcib.exit39.preheader testl %r14d, %r14d jle .LBB2_31 # %bb.15: # %.lr.ph movabsq $4294967360, %r12 # imm = 0x100000040 addl $63, %ebx shrl $6, %ebx leaq (%rbx,%r12), %r13 addq $-64, %r13 leaq 8(%rsp), %r15 .p2align 4, 0x90 .LBB2_16: # =>This Inner Loop Header: Depth=1 movq %r13, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_18 # %bb.17: # in Loop: Header=BB2_16 Depth=1 movq (%rsp), %rax movq 32(%rsp), %rcx movq 16(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) movl %ebp, 12(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 12(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d movl $_Z4spinPiPdS_i, %edi leaq 112(%rsp), %r9 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_18: # in Loop: Header=BB2_16 Depth=1 callq hipPeekAtLastError testl %eax, %eax jne .LBB2_41 # %bb.19: # %_Z9gpuAssert10hipError_tPKcib.exit41 # in Loop: Header=BB2_16 Depth=1 callq hipDeviceSynchronize testl %eax, %eax jne .LBB2_20 # %bb.21: # %_Z9gpuAssert10hipError_tPKcib.exit43 # in Loop: Header=BB2_16 Depth=1 movq %r13, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_23 # %bb.22: # in Loop: Header=BB2_16 Depth=1 movq (%rsp), %rax movq 16(%rsp), %rcx movq 24(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movl %ebp, 12(%rsp) movq %rdx, 88(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 88(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d movl $_Z5checkPiS_iS_, %edi leaq 112(%rsp), %r9 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_23: # in Loop: Header=BB2_16 Depth=1 callq hipPeekAtLastError testl %eax, %eax jne .LBB2_24 # %bb.25: # %_Z9gpuAssert10hipError_tPKcib.exit51 # in Loop: Header=BB2_16 Depth=1 callq hipDeviceSynchronize testl %eax, %eax jne .LBB2_26 # %bb.27: # %_Z9gpuAssert10hipError_tPKcib.exit53 # in Loop: Header=BB2_16 Depth=1 movq 24(%rsp), %rsi movl $4, %edx movq %r15, %rdi movl $2, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_28 # %bb.29: # %_Z9gpuAssert10hipError_tPKcib.exit55 # in Loop: Header=BB2_16 Depth=1 cmpl $0, 8(%rsp) jne .LBB2_31 # %bb.30: # %_Z9gpuAssert10hipError_tPKcib.exit39 # in Loop: Header=BB2_16 Depth=1 movq (%rsp), %rax movq 16(%rsp), %rcx movq %rcx, (%rsp) movq %rax, 16(%rsp) decl %r14d jne .LBB2_16 .LBB2_31: # %_Z9gpuAssert10hipError_tPKcib.exit55._crit_edge movq (%rsp), %rsi movq 160(%rsp), %rdi # 8-byte Reload movq 152(%rsp), %rdx # 8-byte Reload movl $2, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_32 # %bb.33: # %_Z9gpuAssert10hipError_tPKcib.exit57 movq 32(%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB2_34 # %bb.35: # %_Z9gpuAssert10hipError_tPKcib.exit59 movq (%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB2_36 # %bb.37: # %_Z9gpuAssert10hipError_tPKcib.exit61 movq 16(%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB2_38 # %bb.39: # %_Z9gpuAssert10hipError_tPKcib.exit63 addq $168, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB2_41: .cfi_def_cfa_offset 224 movl %eax, %ebx movq stderr(%rip), %r14 movl %eax, %edi callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %r14, %rdi movq %rax, %rdx movl $126, %r8d xorl %eax, %eax callq fprintf movl %ebx, %edi callq exit .LBB2_24: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $130, %r8d jmp .LBB2_2 .LBB2_28: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $132, %r8d jmp .LBB2_2 .LBB2_20: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $127, %r8d jmp .LBB2_2 .LBB2_26: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $131, %r8d .LBB2_2: xorl %eax, %eax callq fprintf movl %ebp, %edi callq exit .LBB2_1: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $100, %r8d jmp .LBB2_2 .LBB2_4: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $101, %r8d jmp .LBB2_2 .LBB2_6: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $106, %r8d jmp .LBB2_2 .LBB2_8: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $107, %r8d jmp .LBB2_2 .LBB2_10: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $111, %r8d jmp .LBB2_2 .LBB2_12: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $116, %r8d jmp .LBB2_2 .LBB2_40: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $117, %r8d jmp .LBB2_2 .LBB2_32: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $143, %r8d jmp .LBB2_2 .LBB2_34: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $146, %r8d jmp .LBB2_2 .LBB2_36: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $147, %r8d jmp .LBB2_2 .LBB2_38: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $148, %r8d jmp .LBB2_2 .Lfunc_end2: .size _Z5isingPiPdii, .Lfunc_end2-_Z5isingPiPdii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $200, %rsp .cfi_def_cfa_offset 224 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movq %rsp, %rdi movl $.L__const.main.w, %esi movl $200, %edx callq memcpy@PLT movl $1048576, %edi # imm = 0x100000 callq malloc movq %rax, %rbx xorl %r14d, %r14d xorl %edi, %edi callq time movl %eax, %edi callq srand .p2align 4, 0x90 .LBB3_1: # =>This Inner Loop Header: Depth=1 callq rand # kill: def $eax killed $eax def $rax movl %eax, %ecx shrl $31, %ecx addl %eax, %ecx andl $2147483646, %ecx # imm = 0x7FFFFFFE subl %ecx, %eax leal -1(,%rax,2), %eax movl %eax, (%rbx,%r14,4) incq %r14 cmpq $262144, %r14 # imm = 0x40000 jne .LBB3_1 # %bb.2: movq %rsp, %rsi movq %rbx, %rdi movl $10, %edx movl $512, %ecx # imm = 0x200 callq _Z5isingPiPdii xorl %eax, %eax addq $200, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z4spinPiPdS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5checkPiS_iS_, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z4spinPiPdS_i,@object # @_Z4spinPiPdS_i .section .rodata,"a",@progbits .globl _Z4spinPiPdS_i .p2align 3, 0x0 _Z4spinPiPdS_i: .quad _Z19__device_stub__spinPiPdS_i .size _Z4spinPiPdS_i, 8 .type _Z5checkPiS_iS_,@object # @_Z5checkPiS_iS_ .globl _Z5checkPiS_iS_ .p2align 3, 0x0 _Z5checkPiS_iS_: .quad _Z20__device_stub__checkPiS_iS_ .size _Z5checkPiS_iS_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/LambisElef/ece-auth-ising-cuda/master/ising_cuda_v1.hip" .size .L.str, 113 .type .L__const.main.w,@object # @__const.main.w .section .rodata,"a",@progbits .p2align 4, 0x0 .L__const.main.w: .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x0000000000000000 # double 0 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .size .L__const.main.w, 200 .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "GPUassert: %s %s %d\n" .size .L.str.1, 21 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z4spinPiPdS_i" .size .L__unnamed_1, 15 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z5checkPiS_iS_" .size .L__unnamed_2, 16 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z19__device_stub__spinPiPdS_i .addrsig_sym _Z20__device_stub__checkPiS_iS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z4spinPiPdS_i .addrsig_sym _Z5checkPiS_iS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000fa854_00000000-6_ising_cuda_v1.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z28__device_stub__Z4spinPiPdS_iPiPdS_i .type _Z28__device_stub__Z4spinPiPdS_iPiPdS_i, @function _Z28__device_stub__Z4spinPiPdS_iPiPdS_i: .LFB2084: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z4spinPiPdS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z28__device_stub__Z4spinPiPdS_iPiPdS_i, .-_Z28__device_stub__Z4spinPiPdS_iPiPdS_i .globl _Z4spinPiPdS_i .type _Z4spinPiPdS_i, @function _Z4spinPiPdS_i: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z28__device_stub__Z4spinPiPdS_iPiPdS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z4spinPiPdS_i, .-_Z4spinPiPdS_i .globl _Z29__device_stub__Z5checkPiS_iS_PiS_iS_ .type _Z29__device_stub__Z5checkPiS_iS_PiS_iS_, @function _Z29__device_stub__Z5checkPiS_iS_PiS_iS_: .LFB2086: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z5checkPiS_iS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2086: .size _Z29__device_stub__Z5checkPiS_iS_PiS_iS_, .-_Z29__device_stub__Z5checkPiS_iS_PiS_iS_ .globl _Z5checkPiS_iS_ .type _Z5checkPiS_iS_, @function _Z5checkPiS_iS_: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z5checkPiS_iS_PiS_iS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _Z5checkPiS_iS_, .-_Z5checkPiS_iS_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "/home/ubuntu/Datasets/stackv2/train-structured/LambisElef/ece-auth-ising-cuda/master/ising_cuda_v1.cu" .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "GPUassert: %s %s %d\n" .text .globl _Z5isingPiPdii .type _Z5isingPiPdii, @function _Z5isingPiPdii: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $104, %rsp .cfi_def_cfa_offset 160 movq %rdi, %rbp movq %rsi, %r14 movl %edx, %r12d movl %ecx, %ebx movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 32(%rsp), %rdi movl $200, %esi call cudaMalloc@PLT testl %eax, %eax jne .L44 movl $1, %ecx movl $200, %edx movq %r14, %rsi movq 32(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %r13d testl %eax, %eax jne .L45 movl %ebx, %r15d imull %ebx, %r15d leal 0(,%r15,4), %r13d movslq %r13d, %r13 leaq 40(%rsp), %rdi movq %r13, %rsi call cudaMalloc@PLT movl %eax, %r14d testl %eax, %eax jne .L46 movl $1, %ecx movq %r13, %rdx movq %rbp, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %r14d testl %eax, %eax jne .L47 leaq 48(%rsp), %rdi movq %r13, %rsi call cudaMalloc@PLT movl %eax, %r14d testl %eax, %eax jne .L48 movl $1, 28(%rsp) leaq 56(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movl %eax, %r14d testl %eax, %eax jne .L49 leaq 28(%rsp), %rsi movl $1, %ecx movl $4, %edx movq 56(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %r14d testl %eax, %eax jne .L26 testl %r12d, %r12d jle .L28 leal 126(%r15), %r14d addl $63, %r15d cmovns %r15d, %r14d sarl $6, %r14d movl $0, 12(%rsp) .L36: movl $64, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) movl %r14d, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $0, %r9d movl $0, %r8d movq 76(%rsp), %rdx movl $1, %ecx movq 64(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L50 .L29: call cudaPeekAtLastError@PLT movl %eax, %r15d testl %eax, %eax jne .L51 call cudaDeviceSynchronize@PLT movl %eax, %r15d testl %eax, %eax jne .L52 movl $64, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) movl %r14d, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $0, %r9d movl $0, %r8d movq 76(%rsp), %rdx movl $1, %ecx movq 64(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L53 .L32: call cudaPeekAtLastError@PLT movl %eax, %r15d testl %eax, %eax jne .L54 call cudaDeviceSynchronize@PLT movl %eax, %r15d testl %eax, %eax jne .L55 leaq 28(%rsp), %rdi movl $2, %ecx movl $4, %edx movq 56(%rsp), %rsi call cudaMemcpy@PLT movl %eax, %r15d testl %eax, %eax jne .L56 cmpl $0, 28(%rsp) jne .L28 movq 40(%rsp), %rax movq 48(%rsp), %rdx movq %rdx, 40(%rsp) movq %rax, 48(%rsp) addl $1, 12(%rsp) movl 12(%rsp), %eax cmpl %eax, %r12d jne .L36 .L28: movl $2, %ecx movq %r13, %rdx movq 40(%rsp), %rsi movq %rbp, %rdi call cudaMemcpy@PLT movl %eax, %ebx testl %eax, %eax jne .L57 movq 32(%rsp), %rdi call cudaFree@PLT movl %eax, %ebx testl %eax, %eax jne .L58 movq 40(%rsp), %rdi call cudaFree@PLT movl %eax, %ebx testl %eax, %eax jne .L59 movq 48(%rsp), %rdi call cudaFree@PLT movl %eax, %ebx testl %eax, %eax jne .L60 movq 88(%rsp), %rax subq %fs:40, %rax jne .L61 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L44: .cfi_restore_state movl %eax, %r13d movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $98, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r13d, %edi call exit@PLT .L45: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $99, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r13d, %edi call exit@PLT .L46: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $104, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r14d, %edi call exit@PLT .L47: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $105, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r14d, %edi call exit@PLT .L48: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $109, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r14d, %edi call exit@PLT .L49: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $114, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r14d, %edi call exit@PLT .L26: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $115, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r14d, %edi call exit@PLT .L50: movl %ebx, %ecx movq 48(%rsp), %rdx movq 32(%rsp), %rsi movq 40(%rsp), %rdi call _Z28__device_stub__Z4spinPiPdS_iPiPdS_i jmp .L29 .L51: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $124, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call exit@PLT .L52: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $125, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call exit@PLT .L53: movq 56(%rsp), %rcx movl %ebx, %edx movq 48(%rsp), %rsi movq 40(%rsp), %rdi call _Z29__device_stub__Z5checkPiS_iS_PiS_iS_ jmp .L32 .L54: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $128, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call exit@PLT .L55: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $129, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call exit@PLT .L56: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $130, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call exit@PLT .L57: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $141, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %ebx, %edi call exit@PLT .L58: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $144, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %ebx, %edi call exit@PLT .L59: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $145, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %ebx, %edi call exit@PLT .L60: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx movl $146, %r9d leaq .LC0(%rip), %r8 leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %ebx, %edi call exit@PLT .L61: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size _Z5isingPiPdii, .-_Z5isingPiPdii .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $208, %rsp .cfi_def_cfa_offset 240 movq %fs:40, %rax movq %rax, 200(%rsp) xorl %eax, %eax movsd .LC2(%rip), %xmm1 movsd %xmm1, (%rsp) movsd .LC3(%rip), %xmm0 movsd %xmm0, 8(%rsp) movsd .LC4(%rip), %xmm2 movsd %xmm2, 16(%rsp) movsd %xmm0, 24(%rsp) movsd %xmm1, 32(%rsp) movsd %xmm0, 40(%rsp) movsd .LC5(%rip), %xmm3 movsd %xmm3, 48(%rsp) movsd .LC6(%rip), %xmm4 movsd %xmm4, 56(%rsp) movsd %xmm3, 64(%rsp) movsd %xmm0, 72(%rsp) movsd %xmm2, 80(%rsp) movsd %xmm4, 88(%rsp) movq $0x000000000, 96(%rsp) movsd %xmm4, 104(%rsp) movsd %xmm2, 112(%rsp) movsd %xmm0, 120(%rsp) movsd %xmm3, 128(%rsp) movsd %xmm4, 136(%rsp) movsd %xmm3, 144(%rsp) movsd %xmm0, 152(%rsp) movsd %xmm1, 160(%rsp) movsd %xmm0, 168(%rsp) movsd %xmm2, 176(%rsp) movsd %xmm0, 184(%rsp) movsd %xmm1, 192(%rsp) movl $1048576, %edi call malloc@PLT movq %rax, %r12 movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT movq %r12, %rbx leaq 1048576(%r12), %rbp .L63: call rand@PLT movl %eax, %edx shrl $31, %edx addl %edx, %eax andl $1, %eax subl %edx, %eax leal -1(%rax,%rax), %eax movl %eax, (%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L63 movq %rsp, %rsi movl $512, %ecx movl $10, %edx movq %r12, %rdi call _Z5isingPiPdii movq 200(%rsp), %rax subq %fs:40, %rax jne .L67 movl $0, %eax addq $208, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L67: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.1 .LC8: .string "_Z5checkPiS_iS_" .LC9: .string "_Z4spinPiPdS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2089: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq _Z5checkPiS_iS_(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _Z4spinPiPdS_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC2: .long -755914244 .long 1064329805 .align 8 .LC3: .long -755914244 .long 1066426957 .align 8 .LC4: .long 1992864825 .long 1067098046 .align 8 .LC5: .long 1443109011 .long 1068641550 .align 8 .LC6: .long 1168231105 .long 1069413302 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "ising_cuda_v1.hip" .globl _Z19__device_stub__spinPiPdS_i # -- Begin function _Z19__device_stub__spinPiPdS_i .p2align 4, 0x90 .type _Z19__device_stub__spinPiPdS_i,@function _Z19__device_stub__spinPiPdS_i: # @_Z19__device_stub__spinPiPdS_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z4spinPiPdS_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z19__device_stub__spinPiPdS_i, .Lfunc_end0-_Z19__device_stub__spinPiPdS_i .cfi_endproc # -- End function .globl _Z20__device_stub__checkPiS_iS_ # -- Begin function _Z20__device_stub__checkPiS_iS_ .p2align 4, 0x90 .type _Z20__device_stub__checkPiS_iS_,@function _Z20__device_stub__checkPiS_iS_: # @_Z20__device_stub__checkPiS_iS_ .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 4(%rsp) movq %rcx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 4(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z5checkPiS_iS_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z20__device_stub__checkPiS_iS_, .Lfunc_end1-_Z20__device_stub__checkPiS_iS_ .cfi_endproc # -- End function .globl _Z5isingPiPdii # -- Begin function _Z5isingPiPdii .p2align 4, 0x90 .type _Z5isingPiPdii,@function _Z5isingPiPdii: # @_Z5isingPiPdii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $168, %rsp .cfi_def_cfa_offset 224 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %ecx, %ebp movl %edx, %r14d movq %rsi, %rbx movq %rdi, %r15 leaq 32(%rsp), %rdi movl $200, %esi callq hipMalloc testl %eax, %eax jne .LBB2_1 # %bb.3: # %_Z9gpuAssert10hipError_tPKcib.exit movq 32(%rsp), %rdi movl $200, %edx movq %rbx, %rsi movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_4 # %bb.5: # %_Z9gpuAssert10hipError_tPKcib.exit29 movl %ebp, %ebx imull %ebx, %ebx leal (,%rbx,4), %eax movslq %eax, %r12 movq %rsp, %rdi movq %r12, %rsi callq hipMalloc testl %eax, %eax jne .LBB2_6 # %bb.7: # %_Z9gpuAssert10hipError_tPKcib.exit31 movq (%rsp), %rdi movq %r15, %rsi movq %r12, %rdx movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_8 # %bb.9: # %_Z9gpuAssert10hipError_tPKcib.exit33 leaq 16(%rsp), %rdi movq %r12, %rsi callq hipMalloc testl %eax, %eax jne .LBB2_10 # %bb.11: # %_Z9gpuAssert10hipError_tPKcib.exit35 movq %r15, 160(%rsp) # 8-byte Spill movl $1, 8(%rsp) leaq 24(%rsp), %rdi movl $4, %esi callq hipMalloc testl %eax, %eax jne .LBB2_12 # %bb.13: # %_Z9gpuAssert10hipError_tPKcib.exit37 movq %r12, 152(%rsp) # 8-byte Spill movq 24(%rsp), %rdi leaq 8(%rsp), %rsi movl $4, %edx movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_40 # %bb.14: # %_Z9gpuAssert10hipError_tPKcib.exit39.preheader testl %r14d, %r14d jle .LBB2_31 # %bb.15: # %.lr.ph movabsq $4294967360, %r12 # imm = 0x100000040 addl $63, %ebx shrl $6, %ebx leaq (%rbx,%r12), %r13 addq $-64, %r13 leaq 8(%rsp), %r15 .p2align 4, 0x90 .LBB2_16: # =>This Inner Loop Header: Depth=1 movq %r13, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_18 # %bb.17: # in Loop: Header=BB2_16 Depth=1 movq (%rsp), %rax movq 32(%rsp), %rcx movq 16(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) movl %ebp, 12(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 12(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d movl $_Z4spinPiPdS_i, %edi leaq 112(%rsp), %r9 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_18: # in Loop: Header=BB2_16 Depth=1 callq hipPeekAtLastError testl %eax, %eax jne .LBB2_41 # %bb.19: # %_Z9gpuAssert10hipError_tPKcib.exit41 # in Loop: Header=BB2_16 Depth=1 callq hipDeviceSynchronize testl %eax, %eax jne .LBB2_20 # %bb.21: # %_Z9gpuAssert10hipError_tPKcib.exit43 # in Loop: Header=BB2_16 Depth=1 movq %r13, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_23 # %bb.22: # in Loop: Header=BB2_16 Depth=1 movq (%rsp), %rax movq 16(%rsp), %rcx movq 24(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movl %ebp, 12(%rsp) movq %rdx, 88(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 88(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d movl $_Z5checkPiS_iS_, %edi leaq 112(%rsp), %r9 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_23: # in Loop: Header=BB2_16 Depth=1 callq hipPeekAtLastError testl %eax, %eax jne .LBB2_24 # %bb.25: # %_Z9gpuAssert10hipError_tPKcib.exit51 # in Loop: Header=BB2_16 Depth=1 callq hipDeviceSynchronize testl %eax, %eax jne .LBB2_26 # %bb.27: # %_Z9gpuAssert10hipError_tPKcib.exit53 # in Loop: Header=BB2_16 Depth=1 movq 24(%rsp), %rsi movl $4, %edx movq %r15, %rdi movl $2, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_28 # %bb.29: # %_Z9gpuAssert10hipError_tPKcib.exit55 # in Loop: Header=BB2_16 Depth=1 cmpl $0, 8(%rsp) jne .LBB2_31 # %bb.30: # %_Z9gpuAssert10hipError_tPKcib.exit39 # in Loop: Header=BB2_16 Depth=1 movq (%rsp), %rax movq 16(%rsp), %rcx movq %rcx, (%rsp) movq %rax, 16(%rsp) decl %r14d jne .LBB2_16 .LBB2_31: # %_Z9gpuAssert10hipError_tPKcib.exit55._crit_edge movq (%rsp), %rsi movq 160(%rsp), %rdi # 8-byte Reload movq 152(%rsp), %rdx # 8-byte Reload movl $2, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_32 # %bb.33: # %_Z9gpuAssert10hipError_tPKcib.exit57 movq 32(%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB2_34 # %bb.35: # %_Z9gpuAssert10hipError_tPKcib.exit59 movq (%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB2_36 # %bb.37: # %_Z9gpuAssert10hipError_tPKcib.exit61 movq 16(%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB2_38 # %bb.39: # %_Z9gpuAssert10hipError_tPKcib.exit63 addq $168, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB2_41: .cfi_def_cfa_offset 224 movl %eax, %ebx movq stderr(%rip), %r14 movl %eax, %edi callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %r14, %rdi movq %rax, %rdx movl $126, %r8d xorl %eax, %eax callq fprintf movl %ebx, %edi callq exit .LBB2_24: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $130, %r8d jmp .LBB2_2 .LBB2_28: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $132, %r8d jmp .LBB2_2 .LBB2_20: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $127, %r8d jmp .LBB2_2 .LBB2_26: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $131, %r8d .LBB2_2: xorl %eax, %eax callq fprintf movl %ebp, %edi callq exit .LBB2_1: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $100, %r8d jmp .LBB2_2 .LBB2_4: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $101, %r8d jmp .LBB2_2 .LBB2_6: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $106, %r8d jmp .LBB2_2 .LBB2_8: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $107, %r8d jmp .LBB2_2 .LBB2_10: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $111, %r8d jmp .LBB2_2 .LBB2_12: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $116, %r8d jmp .LBB2_2 .LBB2_40: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $117, %r8d jmp .LBB2_2 .LBB2_32: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $143, %r8d jmp .LBB2_2 .LBB2_34: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $146, %r8d jmp .LBB2_2 .LBB2_36: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $147, %r8d jmp .LBB2_2 .LBB2_38: movq stderr(%rip), %rbx movl %eax, %edi movl %eax, %ebp callq hipGetErrorString movl $.L.str.1, %esi movl $.L.str, %ecx movq %rbx, %rdi movq %rax, %rdx movl $148, %r8d jmp .LBB2_2 .Lfunc_end2: .size _Z5isingPiPdii, .Lfunc_end2-_Z5isingPiPdii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $200, %rsp .cfi_def_cfa_offset 224 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movq %rsp, %rdi movl $.L__const.main.w, %esi movl $200, %edx callq memcpy@PLT movl $1048576, %edi # imm = 0x100000 callq malloc movq %rax, %rbx xorl %r14d, %r14d xorl %edi, %edi callq time movl %eax, %edi callq srand .p2align 4, 0x90 .LBB3_1: # =>This Inner Loop Header: Depth=1 callq rand # kill: def $eax killed $eax def $rax movl %eax, %ecx shrl $31, %ecx addl %eax, %ecx andl $2147483646, %ecx # imm = 0x7FFFFFFE subl %ecx, %eax leal -1(,%rax,2), %eax movl %eax, (%rbx,%r14,4) incq %r14 cmpq $262144, %r14 # imm = 0x40000 jne .LBB3_1 # %bb.2: movq %rsp, %rsi movq %rbx, %rdi movl $10, %edx movl $512, %ecx # imm = 0x200 callq _Z5isingPiPdii xorl %eax, %eax addq $200, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z4spinPiPdS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5checkPiS_iS_, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z4spinPiPdS_i,@object # @_Z4spinPiPdS_i .section .rodata,"a",@progbits .globl _Z4spinPiPdS_i .p2align 3, 0x0 _Z4spinPiPdS_i: .quad _Z19__device_stub__spinPiPdS_i .size _Z4spinPiPdS_i, 8 .type _Z5checkPiS_iS_,@object # @_Z5checkPiS_iS_ .globl _Z5checkPiS_iS_ .p2align 3, 0x0 _Z5checkPiS_iS_: .quad _Z20__device_stub__checkPiS_iS_ .size _Z5checkPiS_iS_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/LambisElef/ece-auth-ising-cuda/master/ising_cuda_v1.hip" .size .L.str, 113 .type .L__const.main.w,@object # @__const.main.w .section .rodata,"a",@progbits .p2align 4, 0x0 .L__const.main.w: .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x0000000000000000 # double 0 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .size .L__const.main.w, 200 .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "GPUassert: %s %s %d\n" .size .L.str.1, 21 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z4spinPiPdS_i" .size .L__unnamed_1, 15 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z5checkPiS_iS_" .size .L__unnamed_2, 16 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z19__device_stub__spinPiPdS_i .addrsig_sym _Z20__device_stub__checkPiS_iS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z4spinPiPdS_i .addrsig_sym _Z5checkPiS_iS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <stdlib.h> #define CUDA_CHECK_ERROR(X)({\ if((X) != cudaSuccess){\ fprintf(stderr, "CUDA error %d (%s:%d): %s\n", (X), __FILE__, __LINE__, cudaGetErrorString((cudaError_t)(X)));\ exit(1);\ }\ }) #define MALLOC_CHECK_ERROR(X)({\ if ((X) == 0){\ fprintf(stderr, "Malloc error (%s:%d): %i\n", __FILE__, __LINE__, (X));\ exit(1);\ }\ }) // Returns True if |a - b| <= eps inline bool compare_float(float a, float b){ const float eps = 1e-7f; if (a > b) return a - b <= eps; else return b - a <= eps; } // Initialise the vector v of n elements to random values void init_vec(float *v, int n){ for(int i = 0; i < n; i++){ v[i] = rand() % 100 * 0.3234f; } } // kernel to perform vector addition __global__ void vector_add(float *a, float *b, float *c, int n){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) c[i] = a[i] + b[i]; } int main(void){ int n = 2000; float *A = (float*) malloc(n * sizeof(float)); float *B = (float*) malloc(n * sizeof(float)); float *C = (float*) malloc(n * sizeof(float)); MALLOC_CHECK_ERROR(A && B && C); init_vec(A, n); init_vec(B, n); float *dev_A, *dev_B, *dev_C; CUDA_CHECK_ERROR(cudaMalloc(&dev_A, sizeof(float) * n)); CUDA_CHECK_ERROR(cudaMalloc(&dev_B, sizeof(float) * n)); CUDA_CHECK_ERROR(cudaMalloc(&dev_C, sizeof(float) * n)); CUDA_CHECK_ERROR(cudaMemcpy(dev_A, A, sizeof(float) * n, cudaMemcpyHostToDevice)); CUDA_CHECK_ERROR(cudaMemcpy(dev_B, B, sizeof(float) * n, cudaMemcpyHostToDevice)); int nThreads = 1024; int nBlocks = (n + nThreads - 1) / nThreads; vector_add<<<nBlocks, nThreads>>>(dev_A, dev_B, dev_C, n); CUDA_CHECK_ERROR((cudaError_t)cudaGetLastError()); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); CUDA_CHECK_ERROR(cudaMemcpy(C, dev_C, sizeof(float) * n, cudaMemcpyDeviceToHost)); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); // check the result is correct for(int i = 0; i < n; i++){ bool sums_equal = compare_float(C[i], A[i] + B[i]); if(!sums_equal){ fprintf(stderr, "Sum is not correct.\n"); cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); free(A); free(B); free(C); return 1; } } CUDA_CHECK_ERROR(cudaFree(dev_A)); CUDA_CHECK_ERROR(cudaFree(dev_B)); CUDA_CHECK_ERROR(cudaFree(dev_C)); free(A); free(B); free(C); printf("All good.\n"); return 0; }
code for sm_80 Function : _Z10vector_addPfS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.U32.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */ /* 0x000fda0003f06070 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE.U32 R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x000fc800078e0007 */ /*0090*/ IMAD.WIDE.U32 R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x0c0fe400078e0007 */ /*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fc800078e0007 */ /*00d0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */ /* 0x004fca0000000000 */ /*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #define CUDA_CHECK_ERROR(X)({\ if((X) != cudaSuccess){\ fprintf(stderr, "CUDA error %d (%s:%d): %s\n", (X), __FILE__, __LINE__, cudaGetErrorString((cudaError_t)(X)));\ exit(1);\ }\ }) #define MALLOC_CHECK_ERROR(X)({\ if ((X) == 0){\ fprintf(stderr, "Malloc error (%s:%d): %i\n", __FILE__, __LINE__, (X));\ exit(1);\ }\ }) // Returns True if |a - b| <= eps inline bool compare_float(float a, float b){ const float eps = 1e-7f; if (a > b) return a - b <= eps; else return b - a <= eps; } // Initialise the vector v of n elements to random values void init_vec(float *v, int n){ for(int i = 0; i < n; i++){ v[i] = rand() % 100 * 0.3234f; } } // kernel to perform vector addition __global__ void vector_add(float *a, float *b, float *c, int n){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) c[i] = a[i] + b[i]; } int main(void){ int n = 2000; float *A = (float*) malloc(n * sizeof(float)); float *B = (float*) malloc(n * sizeof(float)); float *C = (float*) malloc(n * sizeof(float)); MALLOC_CHECK_ERROR(A && B && C); init_vec(A, n); init_vec(B, n); float *dev_A, *dev_B, *dev_C; CUDA_CHECK_ERROR(cudaMalloc(&dev_A, sizeof(float) * n)); CUDA_CHECK_ERROR(cudaMalloc(&dev_B, sizeof(float) * n)); CUDA_CHECK_ERROR(cudaMalloc(&dev_C, sizeof(float) * n)); CUDA_CHECK_ERROR(cudaMemcpy(dev_A, A, sizeof(float) * n, cudaMemcpyHostToDevice)); CUDA_CHECK_ERROR(cudaMemcpy(dev_B, B, sizeof(float) * n, cudaMemcpyHostToDevice)); int nThreads = 1024; int nBlocks = (n + nThreads - 1) / nThreads; vector_add<<<nBlocks, nThreads>>>(dev_A, dev_B, dev_C, n); CUDA_CHECK_ERROR((cudaError_t)cudaGetLastError()); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); CUDA_CHECK_ERROR(cudaMemcpy(C, dev_C, sizeof(float) * n, cudaMemcpyDeviceToHost)); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); // check the result is correct for(int i = 0; i < n; i++){ bool sums_equal = compare_float(C[i], A[i] + B[i]); if(!sums_equal){ fprintf(stderr, "Sum is not correct.\n"); cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); free(A); free(B); free(C); return 1; } } CUDA_CHECK_ERROR(cudaFree(dev_A)); CUDA_CHECK_ERROR(cudaFree(dev_B)); CUDA_CHECK_ERROR(cudaFree(dev_C)); free(A); free(B); free(C); printf("All good.\n"); return 0; }
.file "tmpxft_0003d824_00000000-6_gpu_vector_add.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z8init_vecPfi .type _Z8init_vecPfi, @function _Z8init_vecPfi: .LFB2058: .cfi_startproc endbr64 testl %esi, %esi jle .L8 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movq %rdi, %rbx movslq %esi, %rsi leaq (%rdi,%rsi,4), %rbp .L5: call rand@PLT movslq %eax, %rdx imulq $1374389535, %rdx, %rdx sarq $37, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx imull $100, %edx, %edx subl %edx, %eax pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 mulss .LC0(%rip), %xmm0 movss %xmm0, (%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L5 addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L8: .cfi_restore 3 .cfi_restore 6 ret .cfi_endproc .LFE2058: .size _Z8init_vecPfi, .-_Z8init_vecPfi .globl _Z35__device_stub__Z10vector_addPfS_S_iPfS_S_i .type _Z35__device_stub__Z10vector_addPfS_S_iPfS_S_i, @function _Z35__device_stub__Z10vector_addPfS_S_iPfS_S_i: .LFB2084: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z10vector_addPfS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z35__device_stub__Z10vector_addPfS_S_iPfS_S_i, .-_Z35__device_stub__Z10vector_addPfS_S_iPfS_S_i .globl _Z10vector_addPfS_S_i .type _Z10vector_addPfS_S_i, @function _Z10vector_addPfS_S_i: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z35__device_stub__Z10vector_addPfS_S_iPfS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z10vector_addPfS_S_i, .-_Z10vector_addPfS_S_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "/home/ubuntu/Datasets/stackv2/train-structured/PawseySC/Introductory-Supercomputing/master/vector_add_gpu/gpu_vector_add.cu" .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "Malloc error (%s:%d): %i\n" .LC3: .string "CUDA error %d (%s:%d): %s\n" .LC5: .string "Sum is not correct.\n" .LC6: .string "All good.\n" .text .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $64, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $8000, %edi call malloc@PLT movq %rax, %rbp movl $8000, %edi call malloc@PLT movq %rax, %rbx movl $8000, %edi call malloc@PLT movq %rax, %r12 testq %rbp, %rbp setne %dl testq %rbx, %rbx setne %al testb %al, %dl je .L20 testq %r12, %r12 je .L20 movl $2000, %esi movq %rbp, %rdi call _Z8init_vecPfi movl $2000, %esi movq %rbx, %rdi call _Z8init_vecPfi leaq 8(%rsp), %rdi movl $8000, %esi call cudaMalloc@PLT testl %eax, %eax jne .L47 leaq 16(%rsp), %rdi movl $8000, %esi call cudaMalloc@PLT testl %eax, %eax jne .L48 leaq 24(%rsp), %rdi movl $8000, %esi call cudaMalloc@PLT testl %eax, %eax jne .L49 movl $1, %ecx movl $8000, %edx movq %rbp, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT testl %eax, %eax jne .L50 movl $1, %ecx movl $8000, %edx movq %rbx, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT testl %eax, %eax jne .L51 movl $1024, 44(%rsp) movl $1, 48(%rsp) movl $2, 32(%rsp) movl $1, 36(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L52 .L27: call cudaGetLastError@PLT testl %eax, %eax jne .L53 call cudaDeviceSynchronize@PLT testl %eax, %eax jne .L54 movl $2, %ecx movl $8000, %edx movq 24(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT testl %eax, %eax jne .L55 call cudaDeviceSynchronize@PLT testl %eax, %eax jne .L56 movl $0, %eax movss .LC4(%rip), %xmm3 movaps %xmm3, %xmm2 jmp .L31 .L20: movl $0, %r9d movl $54, %r8d leaq .LC1(%rip), %rcx leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L47: leaq 8(%rsp), %rbp movl $8000, %esi movq %rbp, %rdi call cudaMalloc@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movl $8000, %esi movq %rbp, %rdi call cudaMalloc@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $58, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L48: .cfi_restore_state leaq 16(%rsp), %rbp movl $8000, %esi movq %rbp, %rdi call cudaMalloc@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movl $8000, %esi movq %rbp, %rdi call cudaMalloc@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $59, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L49: .cfi_restore_state leaq 24(%rsp), %rbp movl $8000, %esi movq %rbp, %rdi call cudaMalloc@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movl $8000, %esi movq %rbp, %rdi call cudaMalloc@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $60, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L50: .cfi_restore_state movl $1, %ecx movl $8000, %edx movq %rbp, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movl $1, %ecx movl $8000, %edx movq %rbp, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $61, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L51: .cfi_restore_state movl $1, %ecx movl $8000, %edx movq %rbx, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbp movl $1, %ecx movl $8000, %edx movq %rbx, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbp .cfi_def_cfa_offset 112 movl $62, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L52: .cfi_restore_state movl $2000, %ecx movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z35__device_stub__Z10vector_addPfS_S_iPfS_S_i jmp .L27 .L53: call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx call cudaGetLastError@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $66, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L54: .cfi_restore_state call cudaDeviceSynchronize@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx call cudaDeviceSynchronize@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $67, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L55: .cfi_restore_state movl $2, %ecx movl $8000, %edx movq 24(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movl $2, %ecx movl $8000, %edx movq 24(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $68, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L56: .cfi_restore_state call cudaDeviceSynchronize@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx call cudaDeviceSynchronize@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $69, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L45: .cfi_restore_state subss %xmm1, %xmm0 comiss %xmm0, %xmm3 jb .L35 .L34: addq $4, %rax cmpq $8000, %rax je .L57 .L31: movss 0(%rbp,%rax), %xmm0 addss (%rbx,%rax), %xmm0 movss (%r12,%rax), %xmm1 comiss %xmm0, %xmm1 jbe .L45 subss %xmm0, %xmm1 comiss %xmm1, %xmm2 jnb .L34 .L35: leaq .LC5(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq %rbp, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq %r12, %rdi call free@PLT movl $1, %eax .L19: movq 56(%rsp), %rdx subq %fs:40, %rdx jne .L58 addq $64, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L57: .cfi_restore_state movq 8(%rsp), %rdi call cudaFree@PLT testl %eax, %eax jne .L59 movq 16(%rsp), %rdi call cudaFree@PLT testl %eax, %eax jne .L60 movq 24(%rsp), %rdi call cudaFree@PLT testl %eax, %eax jne .L61 movq %rbp, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq %r12, %rdi call free@PLT leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $0, %eax jmp .L19 .L59: movq 8(%rsp), %rdi call cudaFree@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movq 8(%rsp), %rdi call cudaFree@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $85, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L60: .cfi_restore_state movq 16(%rsp), %rdi call cudaFree@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movq 16(%rsp), %rdi call cudaFree@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $86, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L61: .cfi_restore_state movq 24(%rsp), %rdi call cudaFree@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movq 24(%rsp), %rdi call cudaFree@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $87, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L58: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.1 .LC7: .string "_Z10vector_addPfS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z10vector_addPfS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1051038895 .align 4 .LC4: .long 869711765 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #define CUDA_CHECK_ERROR(X)({\ if((X) != cudaSuccess){\ fprintf(stderr, "CUDA error %d (%s:%d): %s\n", (X), __FILE__, __LINE__, cudaGetErrorString((cudaError_t)(X)));\ exit(1);\ }\ }) #define MALLOC_CHECK_ERROR(X)({\ if ((X) == 0){\ fprintf(stderr, "Malloc error (%s:%d): %i\n", __FILE__, __LINE__, (X));\ exit(1);\ }\ }) // Returns True if |a - b| <= eps inline bool compare_float(float a, float b){ const float eps = 1e-7f; if (a > b) return a - b <= eps; else return b - a <= eps; } // Initialise the vector v of n elements to random values void init_vec(float *v, int n){ for(int i = 0; i < n; i++){ v[i] = rand() % 100 * 0.3234f; } } // kernel to perform vector addition __global__ void vector_add(float *a, float *b, float *c, int n){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) c[i] = a[i] + b[i]; } int main(void){ int n = 2000; float *A = (float*) malloc(n * sizeof(float)); float *B = (float*) malloc(n * sizeof(float)); float *C = (float*) malloc(n * sizeof(float)); MALLOC_CHECK_ERROR(A && B && C); init_vec(A, n); init_vec(B, n); float *dev_A, *dev_B, *dev_C; CUDA_CHECK_ERROR(cudaMalloc(&dev_A, sizeof(float) * n)); CUDA_CHECK_ERROR(cudaMalloc(&dev_B, sizeof(float) * n)); CUDA_CHECK_ERROR(cudaMalloc(&dev_C, sizeof(float) * n)); CUDA_CHECK_ERROR(cudaMemcpy(dev_A, A, sizeof(float) * n, cudaMemcpyHostToDevice)); CUDA_CHECK_ERROR(cudaMemcpy(dev_B, B, sizeof(float) * n, cudaMemcpyHostToDevice)); int nThreads = 1024; int nBlocks = (n + nThreads - 1) / nThreads; vector_add<<<nBlocks, nThreads>>>(dev_A, dev_B, dev_C, n); CUDA_CHECK_ERROR((cudaError_t)cudaGetLastError()); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); CUDA_CHECK_ERROR(cudaMemcpy(C, dev_C, sizeof(float) * n, cudaMemcpyDeviceToHost)); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); // check the result is correct for(int i = 0; i < n; i++){ bool sums_equal = compare_float(C[i], A[i] + B[i]); if(!sums_equal){ fprintf(stderr, "Sum is not correct.\n"); cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); free(A); free(B); free(C); return 1; } } CUDA_CHECK_ERROR(cudaFree(dev_A)); CUDA_CHECK_ERROR(cudaFree(dev_B)); CUDA_CHECK_ERROR(cudaFree(dev_C)); free(A); free(B); free(C); printf("All good.\n"); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define CUDA_CHECK_ERROR(X)({\ if((X) != hipSuccess){\ fprintf(stderr, "CUDA error %d (%s:%d): %s\n", (X), __FILE__, __LINE__, hipGetErrorString((hipError_t)(X)));\ exit(1);\ }\ }) #define MALLOC_CHECK_ERROR(X)({\ if ((X) == 0){\ fprintf(stderr, "Malloc error (%s:%d): %i\n", __FILE__, __LINE__, (X));\ exit(1);\ }\ }) // Returns True if |a - b| <= eps inline bool compare_float(float a, float b){ const float eps = 1e-7f; if (a > b) return a - b <= eps; else return b - a <= eps; } // Initialise the vector v of n elements to random values void init_vec(float *v, int n){ for(int i = 0; i < n; i++){ v[i] = rand() % 100 * 0.3234f; } } // kernel to perform vector addition __global__ void vector_add(float *a, float *b, float *c, int n){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) c[i] = a[i] + b[i]; } int main(void){ int n = 2000; float *A = (float*) malloc(n * sizeof(float)); float *B = (float*) malloc(n * sizeof(float)); float *C = (float*) malloc(n * sizeof(float)); MALLOC_CHECK_ERROR(A && B && C); init_vec(A, n); init_vec(B, n); float *dev_A, *dev_B, *dev_C; CUDA_CHECK_ERROR(hipMalloc(&dev_A, sizeof(float) * n)); CUDA_CHECK_ERROR(hipMalloc(&dev_B, sizeof(float) * n)); CUDA_CHECK_ERROR(hipMalloc(&dev_C, sizeof(float) * n)); CUDA_CHECK_ERROR(hipMemcpy(dev_A, A, sizeof(float) * n, hipMemcpyHostToDevice)); CUDA_CHECK_ERROR(hipMemcpy(dev_B, B, sizeof(float) * n, hipMemcpyHostToDevice)); int nThreads = 1024; int nBlocks = (n + nThreads - 1) / nThreads; vector_add<<<nBlocks, nThreads>>>(dev_A, dev_B, dev_C, n); CUDA_CHECK_ERROR((hipError_t)hipGetLastError()); CUDA_CHECK_ERROR(hipDeviceSynchronize()); CUDA_CHECK_ERROR(hipMemcpy(C, dev_C, sizeof(float) * n, hipMemcpyDeviceToHost)); CUDA_CHECK_ERROR(hipDeviceSynchronize()); // check the result is correct for(int i = 0; i < n; i++){ bool sums_equal = compare_float(C[i], A[i] + B[i]); if(!sums_equal){ fprintf(stderr, "Sum is not correct.\n"); hipFree(dev_A); hipFree(dev_B); hipFree(dev_C); free(A); free(B); free(C); return 1; } } CUDA_CHECK_ERROR(hipFree(dev_A)); CUDA_CHECK_ERROR(hipFree(dev_B)); CUDA_CHECK_ERROR(hipFree(dev_C)); free(A); free(B); free(C); printf("All good.\n"); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define CUDA_CHECK_ERROR(X)({\ if((X) != hipSuccess){\ fprintf(stderr, "CUDA error %d (%s:%d): %s\n", (X), __FILE__, __LINE__, hipGetErrorString((hipError_t)(X)));\ exit(1);\ }\ }) #define MALLOC_CHECK_ERROR(X)({\ if ((X) == 0){\ fprintf(stderr, "Malloc error (%s:%d): %i\n", __FILE__, __LINE__, (X));\ exit(1);\ }\ }) // Returns True if |a - b| <= eps inline bool compare_float(float a, float b){ const float eps = 1e-7f; if (a > b) return a - b <= eps; else return b - a <= eps; } // Initialise the vector v of n elements to random values void init_vec(float *v, int n){ for(int i = 0; i < n; i++){ v[i] = rand() % 100 * 0.3234f; } } // kernel to perform vector addition __global__ void vector_add(float *a, float *b, float *c, int n){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) c[i] = a[i] + b[i]; } int main(void){ int n = 2000; float *A = (float*) malloc(n * sizeof(float)); float *B = (float*) malloc(n * sizeof(float)); float *C = (float*) malloc(n * sizeof(float)); MALLOC_CHECK_ERROR(A && B && C); init_vec(A, n); init_vec(B, n); float *dev_A, *dev_B, *dev_C; CUDA_CHECK_ERROR(hipMalloc(&dev_A, sizeof(float) * n)); CUDA_CHECK_ERROR(hipMalloc(&dev_B, sizeof(float) * n)); CUDA_CHECK_ERROR(hipMalloc(&dev_C, sizeof(float) * n)); CUDA_CHECK_ERROR(hipMemcpy(dev_A, A, sizeof(float) * n, hipMemcpyHostToDevice)); CUDA_CHECK_ERROR(hipMemcpy(dev_B, B, sizeof(float) * n, hipMemcpyHostToDevice)); int nThreads = 1024; int nBlocks = (n + nThreads - 1) / nThreads; vector_add<<<nBlocks, nThreads>>>(dev_A, dev_B, dev_C, n); CUDA_CHECK_ERROR((hipError_t)hipGetLastError()); CUDA_CHECK_ERROR(hipDeviceSynchronize()); CUDA_CHECK_ERROR(hipMemcpy(C, dev_C, sizeof(float) * n, hipMemcpyDeviceToHost)); CUDA_CHECK_ERROR(hipDeviceSynchronize()); // check the result is correct for(int i = 0; i < n; i++){ bool sums_equal = compare_float(C[i], A[i] + B[i]); if(!sums_equal){ fprintf(stderr, "Sum is not correct.\n"); hipFree(dev_A); hipFree(dev_B); hipFree(dev_C); free(A); free(B); free(C); return 1; } } CUDA_CHECK_ERROR(hipFree(dev_A)); CUDA_CHECK_ERROR(hipFree(dev_B)); CUDA_CHECK_ERROR(hipFree(dev_C)); free(A); free(B); free(C); printf("All good.\n"); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z10vector_addPfS_S_i .globl _Z10vector_addPfS_S_i .p2align 8 .type _Z10vector_addPfS_S_i,@function _Z10vector_addPfS_S_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_u32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x0 v_mov_b32_e32 v2, 0 s_load_b64 s[0:1], s[0:1], 0x10 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) v_add_f32_e32 v2, v2, v3 global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z10vector_addPfS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z10vector_addPfS_S_i, .Lfunc_end0-_Z10vector_addPfS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z10vector_addPfS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z10vector_addPfS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define CUDA_CHECK_ERROR(X)({\ if((X) != hipSuccess){\ fprintf(stderr, "CUDA error %d (%s:%d): %s\n", (X), __FILE__, __LINE__, hipGetErrorString((hipError_t)(X)));\ exit(1);\ }\ }) #define MALLOC_CHECK_ERROR(X)({\ if ((X) == 0){\ fprintf(stderr, "Malloc error (%s:%d): %i\n", __FILE__, __LINE__, (X));\ exit(1);\ }\ }) // Returns True if |a - b| <= eps inline bool compare_float(float a, float b){ const float eps = 1e-7f; if (a > b) return a - b <= eps; else return b - a <= eps; } // Initialise the vector v of n elements to random values void init_vec(float *v, int n){ for(int i = 0; i < n; i++){ v[i] = rand() % 100 * 0.3234f; } } // kernel to perform vector addition __global__ void vector_add(float *a, float *b, float *c, int n){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) c[i] = a[i] + b[i]; } int main(void){ int n = 2000; float *A = (float*) malloc(n * sizeof(float)); float *B = (float*) malloc(n * sizeof(float)); float *C = (float*) malloc(n * sizeof(float)); MALLOC_CHECK_ERROR(A && B && C); init_vec(A, n); init_vec(B, n); float *dev_A, *dev_B, *dev_C; CUDA_CHECK_ERROR(hipMalloc(&dev_A, sizeof(float) * n)); CUDA_CHECK_ERROR(hipMalloc(&dev_B, sizeof(float) * n)); CUDA_CHECK_ERROR(hipMalloc(&dev_C, sizeof(float) * n)); CUDA_CHECK_ERROR(hipMemcpy(dev_A, A, sizeof(float) * n, hipMemcpyHostToDevice)); CUDA_CHECK_ERROR(hipMemcpy(dev_B, B, sizeof(float) * n, hipMemcpyHostToDevice)); int nThreads = 1024; int nBlocks = (n + nThreads - 1) / nThreads; vector_add<<<nBlocks, nThreads>>>(dev_A, dev_B, dev_C, n); CUDA_CHECK_ERROR((hipError_t)hipGetLastError()); CUDA_CHECK_ERROR(hipDeviceSynchronize()); CUDA_CHECK_ERROR(hipMemcpy(C, dev_C, sizeof(float) * n, hipMemcpyDeviceToHost)); CUDA_CHECK_ERROR(hipDeviceSynchronize()); // check the result is correct for(int i = 0; i < n; i++){ bool sums_equal = compare_float(C[i], A[i] + B[i]); if(!sums_equal){ fprintf(stderr, "Sum is not correct.\n"); hipFree(dev_A); hipFree(dev_B); hipFree(dev_C); free(A); free(B); free(C); return 1; } } CUDA_CHECK_ERROR(hipFree(dev_A)); CUDA_CHECK_ERROR(hipFree(dev_B)); CUDA_CHECK_ERROR(hipFree(dev_C)); free(A); free(B); free(C); printf("All good.\n"); return 0; }
.text .file "gpu_vector_add.hip" .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z8init_vecPfi .LCPI0_0: .long 0x3ea594af # float 0.323399991 .text .globl _Z8init_vecPfi .p2align 4, 0x90 .type _Z8init_vecPfi,@function _Z8init_vecPfi: # @_Z8init_vecPfi .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB0_4 # %bb.1: # %.lr.ph.preheader pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %rbx movl %esi, %r14d xorl %r15d, %r15d .p2align 4, 0x90 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand cltq imulq $1374389535, %rax, %rcx # imm = 0x51EB851F movq %rcx, %rdx shrq $63, %rdx sarq $37, %rcx addl %edx, %ecx imull $100, %ecx, %ecx subl %ecx, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss .LCPI0_0(%rip), %xmm0 movss %xmm0, (%rbx,%r15,4) incq %r15 cmpq %r15, %r14 jne .LBB0_2 # %bb.3: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r14 .cfi_restore %r15 .LBB0_4: # %._crit_edge retq .Lfunc_end0: .size _Z8init_vecPfi, .Lfunc_end0-_Z8init_vecPfi .cfi_endproc # -- End function .globl _Z25__device_stub__vector_addPfS_S_i # -- Begin function _Z25__device_stub__vector_addPfS_S_i .p2align 4, 0x90 .type _Z25__device_stub__vector_addPfS_S_i,@function _Z25__device_stub__vector_addPfS_S_i: # @_Z25__device_stub__vector_addPfS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z10vector_addPfS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z25__device_stub__vector_addPfS_S_i, .Lfunc_end1-_Z25__device_stub__vector_addPfS_S_i .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI2_0: .long 0x3ea594af # float 0.323399991 .LCPI2_1: .long 0x33d6bf95 # float 1.00000001E-7 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $168, %rsp .cfi_def_cfa_offset 224 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $8000, %edi # imm = 0x1F40 callq malloc movq %rax, %rbx movl $8000, %edi # imm = 0x1F40 callq malloc movq %rax, %r14 movl $8000, %edi # imm = 0x1F40 callq malloc movq %rax, %r15 testq %rbx, %rbx setne %cl testq %r14, %r14 setne %al andb %cl, %al cmpb $1, %al jne .LBB2_41 # %bb.1: testq %r15, %r15 je .LBB2_41 # %bb.2: # %.lr.ph.i.preheader xorl %r12d, %r12d .p2align 4, 0x90 .LBB2_3: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 callq rand movss .LCPI2_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero cltq imulq $1374389535, %rax, %rcx # imm = 0x51EB851F movq %rcx, %rdx shrq $63, %rdx sarq $37, %rcx addl %edx, %ecx imull $100, %ecx, %ecx subl %ecx, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss %xmm1, %xmm0 movss %xmm0, (%rbx,%r12,4) incq %r12 cmpq $2000, %r12 # imm = 0x7D0 jne .LBB2_3 # %bb.4: # %.lr.ph.i85.preheader xorl %r12d, %r12d .p2align 4, 0x90 .LBB2_5: # %.lr.ph.i85 # =>This Inner Loop Header: Depth=1 callq rand cltq imulq $1374389535, %rax, %rcx # imm = 0x51EB851F movq %rcx, %rdx shrq $63, %rdx sarq $37, %rcx addl %edx, %ecx imull $100, %ecx, %ecx subl %ecx, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss .LCPI2_0(%rip), %xmm0 movss %xmm0, (%r14,%r12,4) incq %r12 cmpq $2000, %r12 # imm = 0x7D0 jne .LBB2_5 # %bb.6: # %_Z8init_vecPfi.exit89 leaq 24(%rsp), %rdi movl $8000, %esi # imm = 0x1F40 callq hipMalloc testl %eax, %eax jne .LBB2_7 # %bb.8: leaq 16(%rsp), %rdi movl $8000, %esi # imm = 0x1F40 callq hipMalloc testl %eax, %eax jne .LBB2_9 # %bb.10: leaq 8(%rsp), %rdi movl $8000, %esi # imm = 0x1F40 callq hipMalloc testl %eax, %eax jne .LBB2_11 # %bb.12: movq 24(%rsp), %rdi movl $8000, %edx # imm = 0x1F40 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_13 # %bb.14: movq 16(%rsp), %rdi movl $8000, %edx # imm = 0x1F40 movq %r14, %rsi movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_15 # %bb.16: movabsq $4294967298, %rdi # imm = 0x100000002 leaq 1022(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_18 # %bb.17: movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 8(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) movl $2000, 36(%rsp) # imm = 0x7D0 leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 36(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z10vector_addPfS_S_i, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_18: callq hipGetLastError testl %eax, %eax jne .LBB2_19 # %bb.21: callq hipDeviceSynchronize testl %eax, %eax jne .LBB2_22 # %bb.23: movq 8(%rsp), %rsi movl $8000, %edx # imm = 0x1F40 movq %r15, %rdi movl $2, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_24 # %bb.25: callq hipDeviceSynchronize testl %eax, %eax jne .LBB2_27 # %bb.26: # %.preheader.preheader xorl %r12d, %r12d movss .LCPI2_1(%rip), %xmm3 # xmm3 = mem[0],zero,zero,zero xorl %r13d, %r13d xorl %eax, %eax .p2align 4, 0x90 .LBB2_29: # %.preheader # =>This Inner Loop Header: Depth=1 movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero movss (%rbx,%r12,4), %xmm4 # xmm4 = mem[0],zero,zero,zero addss (%r14,%r12,4), %xmm4 movaps %xmm0, %xmm1 subss %xmm4, %xmm1 movaps %xmm4, %xmm2 subss %xmm0, %xmm2 cmpltss %xmm0, %xmm4 andps %xmm4, %xmm1 andnps %xmm2, %xmm4 orps %xmm1, %xmm4 ucomiss %xmm4, %xmm3 jb .LBB2_30 # %bb.31: # in Loop: Header=BB2_29 Depth=1 ucomiss %xmm4, %xmm3 jb .LBB2_32 .LBB2_28: # in Loop: Header=BB2_29 Depth=1 cmpq $1999, %r12 # imm = 0x7CF leaq 1(%r12), %rcx setae %r13b movq %rcx, %r12 cmpq $2000, %rcx # imm = 0x7D0 jne .LBB2_29 jmp .LBB2_32 .LBB2_30: # in Loop: Header=BB2_29 Depth=1 movq stderr(%rip), %rcx movl $.L.str.3, %edi movl $20, %esi movl $1, %edx movaps %xmm4, 144(%rsp) # 16-byte Spill callq fwrite@PLT movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free movaps 144(%rsp), %xmm4 # 16-byte Reload movss .LCPI2_1(%rip), %xmm3 # xmm3 = mem[0],zero,zero,zero movl $1, %eax ucomiss %xmm4, %xmm3 jae .LBB2_28 .LBB2_32: testb $1, %r13b je .LBB2_40 # %bb.33: movq 24(%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB2_34 # %bb.35: movq 16(%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB2_36 # %bb.37: movq 8(%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB2_38 # %bb.39: movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free movl $.Lstr, %edi callq puts@PLT xorl %eax, %eax .LBB2_40: addq $168, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB2_41: .cfi_def_cfa_offset 224 testq %r15, %r15 setne %cl movq stderr(%rip), %rdi andb %cl, %al movzbl %al, %r8d movl $.L.str, %esi movl $.L.str.1, %edx movl $56, %ecx xorl %eax, %eax callq fprintf movl $1, %edi callq exit .LBB2_7: movq stderr(%rip), %rbx leaq 24(%rsp), %r14 movq %r14, %rdi callq _ZL9hipMallocIfE10hipError_tPPT_m movl %eax, %ebp movq %r14, %rdi callq _ZL9hipMallocIfE10hipError_tPPT_m movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $60, %r8d jmp .LBB2_20 .LBB2_9: movq stderr(%rip), %rbx leaq 16(%rsp), %r14 movq %r14, %rdi callq _ZL9hipMallocIfE10hipError_tPPT_m movl %eax, %ebp movq %r14, %rdi callq _ZL9hipMallocIfE10hipError_tPPT_m movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $61, %r8d jmp .LBB2_20 .LBB2_11: movq stderr(%rip), %rbx leaq 8(%rsp), %r14 movq %r14, %rdi callq _ZL9hipMallocIfE10hipError_tPPT_m movl %eax, %ebp movq %r14, %rdi callq _ZL9hipMallocIfE10hipError_tPPT_m movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $62, %r8d jmp .LBB2_20 .LBB2_13: movq stderr(%rip), %r14 movq 24(%rsp), %rdi movl $8000, %edx # imm = 0x1F40 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movl %eax, %ebp movq 24(%rsp), %rdi movl $8000, %edx # imm = 0x1F40 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %r14, %rdi movl %ebp, %edx movl $63, %r8d jmp .LBB2_20 .LBB2_15: movq stderr(%rip), %rbx movq 16(%rsp), %rdi movl $8000, %edx # imm = 0x1F40 movq %r14, %rsi movl $1, %ecx callq hipMemcpy movl %eax, %ebp movq 16(%rsp), %rdi movl $8000, %edx # imm = 0x1F40 movq %r14, %rsi movl $1, %ecx callq hipMemcpy movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $64, %r8d jmp .LBB2_20 .LBB2_19: movq stderr(%rip), %rbx callq hipGetLastError movl %eax, %ebp callq hipGetLastError movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $68, %r8d jmp .LBB2_20 .LBB2_22: movq stderr(%rip), %rbx callq hipDeviceSynchronize movl %eax, %ebp callq hipDeviceSynchronize movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $69, %r8d jmp .LBB2_20 .LBB2_24: movq stderr(%rip), %rbx movq 8(%rsp), %rsi movl $8000, %edx # imm = 0x1F40 movq %r15, %rdi movl $2, %ecx callq hipMemcpy movl %eax, %ebp movq 8(%rsp), %rsi movl $8000, %edx # imm = 0x1F40 movq %r15, %rdi movl $2, %ecx callq hipMemcpy movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $70, %r8d jmp .LBB2_20 .LBB2_27: movq stderr(%rip), %rbx callq hipDeviceSynchronize movl %eax, %ebp callq hipDeviceSynchronize movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $71, %r8d jmp .LBB2_20 .LBB2_34: movq stderr(%rip), %rbx movq 24(%rsp), %rdi callq hipFree movl %eax, %ebp movq 24(%rsp), %rdi callq hipFree movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $87, %r8d jmp .LBB2_20 .LBB2_36: movq stderr(%rip), %rbx movq 16(%rsp), %rdi callq hipFree movl %eax, %ebp movq 16(%rsp), %rdi callq hipFree movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $88, %r8d jmp .LBB2_20 .LBB2_38: movq stderr(%rip), %rbx movq 8(%rsp), %rdi callq hipFree movl %eax, %ebp movq 8(%rsp), %rdi callq hipFree movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $89, %r8d .LBB2_20: movq %rax, %r9 xorl %eax, %eax callq fprintf movl $1, %edi callq exit .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function _ZL9hipMallocIfE10hipError_tPPT_m .type _ZL9hipMallocIfE10hipError_tPPT_m,@function _ZL9hipMallocIfE10hipError_tPPT_m: # @_ZL9hipMallocIfE10hipError_tPPT_m .cfi_startproc # %bb.0: movl $8000, %esi # imm = 0x1F40 jmp hipMalloc # TAILCALL .Lfunc_end3: .size _ZL9hipMallocIfE10hipError_tPPT_m, .Lfunc_end3-_ZL9hipMallocIfE10hipError_tPPT_m .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10vector_addPfS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z10vector_addPfS_S_i,@object # @_Z10vector_addPfS_S_i .section .rodata,"a",@progbits .globl _Z10vector_addPfS_S_i .p2align 3, 0x0 _Z10vector_addPfS_S_i: .quad _Z25__device_stub__vector_addPfS_S_i .size _Z10vector_addPfS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Malloc error (%s:%d): %i\n" .size .L.str, 26 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/PawseySC/Introductory-Supercomputing/master/vector_add_gpu/gpu_vector_add.hip" .size .L.str.1, 135 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "CUDA error %d (%s:%d): %s\n" .size .L.str.2, 27 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Sum is not correct.\n" .size .L.str.3, 21 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z10vector_addPfS_S_i" .size .L__unnamed_1, 22 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "All good." .size .Lstr, 10 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__vector_addPfS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10vector_addPfS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z10vector_addPfS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.U32.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */ /* 0x000fda0003f06070 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE.U32 R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x000fc800078e0007 */ /*0090*/ IMAD.WIDE.U32 R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x0c0fe400078e0007 */ /*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fc800078e0007 */ /*00d0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */ /* 0x004fca0000000000 */ /*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z10vector_addPfS_S_i .globl _Z10vector_addPfS_S_i .p2align 8 .type _Z10vector_addPfS_S_i,@function _Z10vector_addPfS_S_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_u32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x0 v_mov_b32_e32 v2, 0 s_load_b64 s[0:1], s[0:1], 0x10 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) v_add_f32_e32 v2, v2, v3 global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z10vector_addPfS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z10vector_addPfS_S_i, .Lfunc_end0-_Z10vector_addPfS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z10vector_addPfS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z10vector_addPfS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0003d824_00000000-6_gpu_vector_add.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z8init_vecPfi .type _Z8init_vecPfi, @function _Z8init_vecPfi: .LFB2058: .cfi_startproc endbr64 testl %esi, %esi jle .L8 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movq %rdi, %rbx movslq %esi, %rsi leaq (%rdi,%rsi,4), %rbp .L5: call rand@PLT movslq %eax, %rdx imulq $1374389535, %rdx, %rdx sarq $37, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx imull $100, %edx, %edx subl %edx, %eax pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 mulss .LC0(%rip), %xmm0 movss %xmm0, (%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L5 addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L8: .cfi_restore 3 .cfi_restore 6 ret .cfi_endproc .LFE2058: .size _Z8init_vecPfi, .-_Z8init_vecPfi .globl _Z35__device_stub__Z10vector_addPfS_S_iPfS_S_i .type _Z35__device_stub__Z10vector_addPfS_S_iPfS_S_i, @function _Z35__device_stub__Z10vector_addPfS_S_iPfS_S_i: .LFB2084: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z10vector_addPfS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z35__device_stub__Z10vector_addPfS_S_iPfS_S_i, .-_Z35__device_stub__Z10vector_addPfS_S_iPfS_S_i .globl _Z10vector_addPfS_S_i .type _Z10vector_addPfS_S_i, @function _Z10vector_addPfS_S_i: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z35__device_stub__Z10vector_addPfS_S_iPfS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z10vector_addPfS_S_i, .-_Z10vector_addPfS_S_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "/home/ubuntu/Datasets/stackv2/train-structured/PawseySC/Introductory-Supercomputing/master/vector_add_gpu/gpu_vector_add.cu" .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "Malloc error (%s:%d): %i\n" .LC3: .string "CUDA error %d (%s:%d): %s\n" .LC5: .string "Sum is not correct.\n" .LC6: .string "All good.\n" .text .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $64, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $8000, %edi call malloc@PLT movq %rax, %rbp movl $8000, %edi call malloc@PLT movq %rax, %rbx movl $8000, %edi call malloc@PLT movq %rax, %r12 testq %rbp, %rbp setne %dl testq %rbx, %rbx setne %al testb %al, %dl je .L20 testq %r12, %r12 je .L20 movl $2000, %esi movq %rbp, %rdi call _Z8init_vecPfi movl $2000, %esi movq %rbx, %rdi call _Z8init_vecPfi leaq 8(%rsp), %rdi movl $8000, %esi call cudaMalloc@PLT testl %eax, %eax jne .L47 leaq 16(%rsp), %rdi movl $8000, %esi call cudaMalloc@PLT testl %eax, %eax jne .L48 leaq 24(%rsp), %rdi movl $8000, %esi call cudaMalloc@PLT testl %eax, %eax jne .L49 movl $1, %ecx movl $8000, %edx movq %rbp, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT testl %eax, %eax jne .L50 movl $1, %ecx movl $8000, %edx movq %rbx, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT testl %eax, %eax jne .L51 movl $1024, 44(%rsp) movl $1, 48(%rsp) movl $2, 32(%rsp) movl $1, 36(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L52 .L27: call cudaGetLastError@PLT testl %eax, %eax jne .L53 call cudaDeviceSynchronize@PLT testl %eax, %eax jne .L54 movl $2, %ecx movl $8000, %edx movq 24(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT testl %eax, %eax jne .L55 call cudaDeviceSynchronize@PLT testl %eax, %eax jne .L56 movl $0, %eax movss .LC4(%rip), %xmm3 movaps %xmm3, %xmm2 jmp .L31 .L20: movl $0, %r9d movl $54, %r8d leaq .LC1(%rip), %rcx leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L47: leaq 8(%rsp), %rbp movl $8000, %esi movq %rbp, %rdi call cudaMalloc@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movl $8000, %esi movq %rbp, %rdi call cudaMalloc@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $58, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L48: .cfi_restore_state leaq 16(%rsp), %rbp movl $8000, %esi movq %rbp, %rdi call cudaMalloc@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movl $8000, %esi movq %rbp, %rdi call cudaMalloc@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $59, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L49: .cfi_restore_state leaq 24(%rsp), %rbp movl $8000, %esi movq %rbp, %rdi call cudaMalloc@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movl $8000, %esi movq %rbp, %rdi call cudaMalloc@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $60, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L50: .cfi_restore_state movl $1, %ecx movl $8000, %edx movq %rbp, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movl $1, %ecx movl $8000, %edx movq %rbp, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $61, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L51: .cfi_restore_state movl $1, %ecx movl $8000, %edx movq %rbx, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbp movl $1, %ecx movl $8000, %edx movq %rbx, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbp .cfi_def_cfa_offset 112 movl $62, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L52: .cfi_restore_state movl $2000, %ecx movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z35__device_stub__Z10vector_addPfS_S_iPfS_S_i jmp .L27 .L53: call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx call cudaGetLastError@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $66, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L54: .cfi_restore_state call cudaDeviceSynchronize@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx call cudaDeviceSynchronize@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $67, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L55: .cfi_restore_state movl $2, %ecx movl $8000, %edx movq 24(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movl $2, %ecx movl $8000, %edx movq 24(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $68, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L56: .cfi_restore_state call cudaDeviceSynchronize@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx call cudaDeviceSynchronize@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $69, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L45: .cfi_restore_state subss %xmm1, %xmm0 comiss %xmm0, %xmm3 jb .L35 .L34: addq $4, %rax cmpq $8000, %rax je .L57 .L31: movss 0(%rbp,%rax), %xmm0 addss (%rbx,%rax), %xmm0 movss (%r12,%rax), %xmm1 comiss %xmm0, %xmm1 jbe .L45 subss %xmm0, %xmm1 comiss %xmm1, %xmm2 jnb .L34 .L35: leaq .LC5(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq %rbp, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq %r12, %rdi call free@PLT movl $1, %eax .L19: movq 56(%rsp), %rdx subq %fs:40, %rdx jne .L58 addq $64, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L57: .cfi_restore_state movq 8(%rsp), %rdi call cudaFree@PLT testl %eax, %eax jne .L59 movq 16(%rsp), %rdi call cudaFree@PLT testl %eax, %eax jne .L60 movq 24(%rsp), %rdi call cudaFree@PLT testl %eax, %eax jne .L61 movq %rbp, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq %r12, %rdi call free@PLT leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $0, %eax jmp .L19 .L59: movq 8(%rsp), %rdi call cudaFree@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movq 8(%rsp), %rdi call cudaFree@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $85, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L60: .cfi_restore_state movq 16(%rsp), %rdi call cudaFree@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movq 16(%rsp), %rdi call cudaFree@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $86, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L61: .cfi_restore_state movq 24(%rsp), %rdi call cudaFree@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rbx movq 24(%rsp), %rdi call cudaFree@PLT movl %eax, %ecx subq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 104 pushq %rbx .cfi_def_cfa_offset 112 movl $87, %r9d leaq .LC1(%rip), %r8 leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L58: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.1 .LC7: .string "_Z10vector_addPfS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z10vector_addPfS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1051038895 .align 4 .LC4: .long 869711765 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "gpu_vector_add.hip" .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z8init_vecPfi .LCPI0_0: .long 0x3ea594af # float 0.323399991 .text .globl _Z8init_vecPfi .p2align 4, 0x90 .type _Z8init_vecPfi,@function _Z8init_vecPfi: # @_Z8init_vecPfi .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB0_4 # %bb.1: # %.lr.ph.preheader pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %rbx movl %esi, %r14d xorl %r15d, %r15d .p2align 4, 0x90 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand cltq imulq $1374389535, %rax, %rcx # imm = 0x51EB851F movq %rcx, %rdx shrq $63, %rdx sarq $37, %rcx addl %edx, %ecx imull $100, %ecx, %ecx subl %ecx, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss .LCPI0_0(%rip), %xmm0 movss %xmm0, (%rbx,%r15,4) incq %r15 cmpq %r15, %r14 jne .LBB0_2 # %bb.3: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r14 .cfi_restore %r15 .LBB0_4: # %._crit_edge retq .Lfunc_end0: .size _Z8init_vecPfi, .Lfunc_end0-_Z8init_vecPfi .cfi_endproc # -- End function .globl _Z25__device_stub__vector_addPfS_S_i # -- Begin function _Z25__device_stub__vector_addPfS_S_i .p2align 4, 0x90 .type _Z25__device_stub__vector_addPfS_S_i,@function _Z25__device_stub__vector_addPfS_S_i: # @_Z25__device_stub__vector_addPfS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z10vector_addPfS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z25__device_stub__vector_addPfS_S_i, .Lfunc_end1-_Z25__device_stub__vector_addPfS_S_i .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI2_0: .long 0x3ea594af # float 0.323399991 .LCPI2_1: .long 0x33d6bf95 # float 1.00000001E-7 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $168, %rsp .cfi_def_cfa_offset 224 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $8000, %edi # imm = 0x1F40 callq malloc movq %rax, %rbx movl $8000, %edi # imm = 0x1F40 callq malloc movq %rax, %r14 movl $8000, %edi # imm = 0x1F40 callq malloc movq %rax, %r15 testq %rbx, %rbx setne %cl testq %r14, %r14 setne %al andb %cl, %al cmpb $1, %al jne .LBB2_41 # %bb.1: testq %r15, %r15 je .LBB2_41 # %bb.2: # %.lr.ph.i.preheader xorl %r12d, %r12d .p2align 4, 0x90 .LBB2_3: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 callq rand movss .LCPI2_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero cltq imulq $1374389535, %rax, %rcx # imm = 0x51EB851F movq %rcx, %rdx shrq $63, %rdx sarq $37, %rcx addl %edx, %ecx imull $100, %ecx, %ecx subl %ecx, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss %xmm1, %xmm0 movss %xmm0, (%rbx,%r12,4) incq %r12 cmpq $2000, %r12 # imm = 0x7D0 jne .LBB2_3 # %bb.4: # %.lr.ph.i85.preheader xorl %r12d, %r12d .p2align 4, 0x90 .LBB2_5: # %.lr.ph.i85 # =>This Inner Loop Header: Depth=1 callq rand cltq imulq $1374389535, %rax, %rcx # imm = 0x51EB851F movq %rcx, %rdx shrq $63, %rdx sarq $37, %rcx addl %edx, %ecx imull $100, %ecx, %ecx subl %ecx, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss .LCPI2_0(%rip), %xmm0 movss %xmm0, (%r14,%r12,4) incq %r12 cmpq $2000, %r12 # imm = 0x7D0 jne .LBB2_5 # %bb.6: # %_Z8init_vecPfi.exit89 leaq 24(%rsp), %rdi movl $8000, %esi # imm = 0x1F40 callq hipMalloc testl %eax, %eax jne .LBB2_7 # %bb.8: leaq 16(%rsp), %rdi movl $8000, %esi # imm = 0x1F40 callq hipMalloc testl %eax, %eax jne .LBB2_9 # %bb.10: leaq 8(%rsp), %rdi movl $8000, %esi # imm = 0x1F40 callq hipMalloc testl %eax, %eax jne .LBB2_11 # %bb.12: movq 24(%rsp), %rdi movl $8000, %edx # imm = 0x1F40 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_13 # %bb.14: movq 16(%rsp), %rdi movl $8000, %edx # imm = 0x1F40 movq %r14, %rsi movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_15 # %bb.16: movabsq $4294967298, %rdi # imm = 0x100000002 leaq 1022(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_18 # %bb.17: movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 8(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) movl $2000, 36(%rsp) # imm = 0x7D0 leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 36(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z10vector_addPfS_S_i, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_18: callq hipGetLastError testl %eax, %eax jne .LBB2_19 # %bb.21: callq hipDeviceSynchronize testl %eax, %eax jne .LBB2_22 # %bb.23: movq 8(%rsp), %rsi movl $8000, %edx # imm = 0x1F40 movq %r15, %rdi movl $2, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_24 # %bb.25: callq hipDeviceSynchronize testl %eax, %eax jne .LBB2_27 # %bb.26: # %.preheader.preheader xorl %r12d, %r12d movss .LCPI2_1(%rip), %xmm3 # xmm3 = mem[0],zero,zero,zero xorl %r13d, %r13d xorl %eax, %eax .p2align 4, 0x90 .LBB2_29: # %.preheader # =>This Inner Loop Header: Depth=1 movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero movss (%rbx,%r12,4), %xmm4 # xmm4 = mem[0],zero,zero,zero addss (%r14,%r12,4), %xmm4 movaps %xmm0, %xmm1 subss %xmm4, %xmm1 movaps %xmm4, %xmm2 subss %xmm0, %xmm2 cmpltss %xmm0, %xmm4 andps %xmm4, %xmm1 andnps %xmm2, %xmm4 orps %xmm1, %xmm4 ucomiss %xmm4, %xmm3 jb .LBB2_30 # %bb.31: # in Loop: Header=BB2_29 Depth=1 ucomiss %xmm4, %xmm3 jb .LBB2_32 .LBB2_28: # in Loop: Header=BB2_29 Depth=1 cmpq $1999, %r12 # imm = 0x7CF leaq 1(%r12), %rcx setae %r13b movq %rcx, %r12 cmpq $2000, %rcx # imm = 0x7D0 jne .LBB2_29 jmp .LBB2_32 .LBB2_30: # in Loop: Header=BB2_29 Depth=1 movq stderr(%rip), %rcx movl $.L.str.3, %edi movl $20, %esi movl $1, %edx movaps %xmm4, 144(%rsp) # 16-byte Spill callq fwrite@PLT movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free movaps 144(%rsp), %xmm4 # 16-byte Reload movss .LCPI2_1(%rip), %xmm3 # xmm3 = mem[0],zero,zero,zero movl $1, %eax ucomiss %xmm4, %xmm3 jae .LBB2_28 .LBB2_32: testb $1, %r13b je .LBB2_40 # %bb.33: movq 24(%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB2_34 # %bb.35: movq 16(%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB2_36 # %bb.37: movq 8(%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB2_38 # %bb.39: movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free movl $.Lstr, %edi callq puts@PLT xorl %eax, %eax .LBB2_40: addq $168, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB2_41: .cfi_def_cfa_offset 224 testq %r15, %r15 setne %cl movq stderr(%rip), %rdi andb %cl, %al movzbl %al, %r8d movl $.L.str, %esi movl $.L.str.1, %edx movl $56, %ecx xorl %eax, %eax callq fprintf movl $1, %edi callq exit .LBB2_7: movq stderr(%rip), %rbx leaq 24(%rsp), %r14 movq %r14, %rdi callq _ZL9hipMallocIfE10hipError_tPPT_m movl %eax, %ebp movq %r14, %rdi callq _ZL9hipMallocIfE10hipError_tPPT_m movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $60, %r8d jmp .LBB2_20 .LBB2_9: movq stderr(%rip), %rbx leaq 16(%rsp), %r14 movq %r14, %rdi callq _ZL9hipMallocIfE10hipError_tPPT_m movl %eax, %ebp movq %r14, %rdi callq _ZL9hipMallocIfE10hipError_tPPT_m movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $61, %r8d jmp .LBB2_20 .LBB2_11: movq stderr(%rip), %rbx leaq 8(%rsp), %r14 movq %r14, %rdi callq _ZL9hipMallocIfE10hipError_tPPT_m movl %eax, %ebp movq %r14, %rdi callq _ZL9hipMallocIfE10hipError_tPPT_m movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $62, %r8d jmp .LBB2_20 .LBB2_13: movq stderr(%rip), %r14 movq 24(%rsp), %rdi movl $8000, %edx # imm = 0x1F40 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movl %eax, %ebp movq 24(%rsp), %rdi movl $8000, %edx # imm = 0x1F40 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %r14, %rdi movl %ebp, %edx movl $63, %r8d jmp .LBB2_20 .LBB2_15: movq stderr(%rip), %rbx movq 16(%rsp), %rdi movl $8000, %edx # imm = 0x1F40 movq %r14, %rsi movl $1, %ecx callq hipMemcpy movl %eax, %ebp movq 16(%rsp), %rdi movl $8000, %edx # imm = 0x1F40 movq %r14, %rsi movl $1, %ecx callq hipMemcpy movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $64, %r8d jmp .LBB2_20 .LBB2_19: movq stderr(%rip), %rbx callq hipGetLastError movl %eax, %ebp callq hipGetLastError movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $68, %r8d jmp .LBB2_20 .LBB2_22: movq stderr(%rip), %rbx callq hipDeviceSynchronize movl %eax, %ebp callq hipDeviceSynchronize movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $69, %r8d jmp .LBB2_20 .LBB2_24: movq stderr(%rip), %rbx movq 8(%rsp), %rsi movl $8000, %edx # imm = 0x1F40 movq %r15, %rdi movl $2, %ecx callq hipMemcpy movl %eax, %ebp movq 8(%rsp), %rsi movl $8000, %edx # imm = 0x1F40 movq %r15, %rdi movl $2, %ecx callq hipMemcpy movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $70, %r8d jmp .LBB2_20 .LBB2_27: movq stderr(%rip), %rbx callq hipDeviceSynchronize movl %eax, %ebp callq hipDeviceSynchronize movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $71, %r8d jmp .LBB2_20 .LBB2_34: movq stderr(%rip), %rbx movq 24(%rsp), %rdi callq hipFree movl %eax, %ebp movq 24(%rsp), %rdi callq hipFree movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $87, %r8d jmp .LBB2_20 .LBB2_36: movq stderr(%rip), %rbx movq 16(%rsp), %rdi callq hipFree movl %eax, %ebp movq 16(%rsp), %rdi callq hipFree movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $88, %r8d jmp .LBB2_20 .LBB2_38: movq stderr(%rip), %rbx movq 8(%rsp), %rdi callq hipFree movl %eax, %ebp movq 8(%rsp), %rdi callq hipFree movl %eax, %edi callq hipGetErrorString movl $.L.str.2, %esi movl $.L.str.1, %ecx movq %rbx, %rdi movl %ebp, %edx movl $89, %r8d .LBB2_20: movq %rax, %r9 xorl %eax, %eax callq fprintf movl $1, %edi callq exit .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function _ZL9hipMallocIfE10hipError_tPPT_m .type _ZL9hipMallocIfE10hipError_tPPT_m,@function _ZL9hipMallocIfE10hipError_tPPT_m: # @_ZL9hipMallocIfE10hipError_tPPT_m .cfi_startproc # %bb.0: movl $8000, %esi # imm = 0x1F40 jmp hipMalloc # TAILCALL .Lfunc_end3: .size _ZL9hipMallocIfE10hipError_tPPT_m, .Lfunc_end3-_ZL9hipMallocIfE10hipError_tPPT_m .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10vector_addPfS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z10vector_addPfS_S_i,@object # @_Z10vector_addPfS_S_i .section .rodata,"a",@progbits .globl _Z10vector_addPfS_S_i .p2align 3, 0x0 _Z10vector_addPfS_S_i: .quad _Z25__device_stub__vector_addPfS_S_i .size _Z10vector_addPfS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Malloc error (%s:%d): %i\n" .size .L.str, 26 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/PawseySC/Introductory-Supercomputing/master/vector_add_gpu/gpu_vector_add.hip" .size .L.str.1, 135 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "CUDA error %d (%s:%d): %s\n" .size .L.str.2, 27 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Sum is not correct.\n" .size .L.str.3, 21 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z10vector_addPfS_S_i" .size .L__unnamed_1, 22 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "All good." .size .Lstr, 10 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__vector_addPfS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10vector_addPfS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> #include <random> #include <sstream> #include <cassert> #include <fstream> #include <cfloat> #include <cstdlib> #include <iostream> #include <string> #include <ctime> #include <queue> #include <pthread.h> #include <png.h> #include "tbb/concurrent_queue.h" #include <chrono> #include <cuda.h> #include <cuda_runtime.h> using namespace std; using namespace tbb; #define BLOCKS_PER_GRID 32 #define THREADS_PER_BLOCK 32 clock_t start, stop; //Global constants #define t_step 1e-7 #define queue_size 25 static const int iters = 800; static const int steps_per_frame = 2000; static const double delta_per_step = 1e-5; static const double delta_minimum = 1e-7; static const double t_start = -3.0; static const double t_end = 3.0; static const int fad_speed = 10; static std::mt19937 rand_gen; static const float dot_sizes[3] = { 1.0f, 3.0f, 10.0f }; static const int num_params = 18; double params[num_params]; // 18 //Global variables static int window_w = 1600; static int window_h = 900; static int window_bits = 24; static float plot_scale = 0.25f; static float plot_x = 0.0f; static float plot_y = 0.0f; // thread constants int num_computing_threads = 2; int num_io_threads = 6; // int start_points[6] = {0, 1, 2, 3, 4, 5}; // int start_points[3] = {0, 2, 4}; // start from [i] - 3; int start_points[2] = {0, 3}; int each_thread_step = 3;//int(6/num_computing_threads); // -3 ~ 3 / num_computing_therads int io_point[6] = {0, 1, 2, 3, 4, 5}; int computing_to_io_ratio = 3; // 1 computing thread maps to 3 io thread struct Color{ int r; int g; int b; }; struct Vector2f{ double x; double y; } ; struct Vertex{ Vector2f position; Color color; }; struct raw_vector2f{ double* xs; double* ys; }; struct V{ vector<Vertex> vertex_array; double t; }; // queue<V> vertex_array_queue[6]; concurrent_queue<V> vertex_array_queue[6]; inline void e(cudaError_t err, const char* file, int line) { if (err != cudaSuccess) { printf("Error in %s at line %d:\n\t%s\n", file, line, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } #define HANDLE_ERROR(err) ( e(err, __FILE__, __LINE__) ) static Color GetRandColor(int i) { i += 1; int r = std::min(255, 50 + (i * 11909) % 256); int g = std::min(255, 50 + (i * 52973) % 256); int b = std::min(255, 50 + (i * 44111) % 256); return Color{r, g, b}; } static void ResetPlot() { plot_scale = 0.25f; plot_x = 0.0f; plot_y = 0.0f; } __device__ void ToScreen(Vector2f& screenPt) { const float s = 0.25f * 1600.0 / 2.0; const float nx = 1600.0 * 0.5f + (float(screenPt.x) - 0.0) * s; const float ny = 900.0 * 0.5f + (float(screenPt.y) - 0.0) * s; screenPt.x = nx; screenPt.y = ny; // return Vector2f{nx, ny}; } static void RandParams(double* params) { params[ 0] = 1; params[ 1] = 0; params[ 2] = 0; params[ 3] = 0; params[ 4] =-1; params[ 5] = 1; params[ 6] =-1; params[ 7] = 0; params[ 8] = 0; params[ 9] = 0; params[10] =-1; params[11] =-1; params[12] =-1; params[13] =-1; params[14] =-1; params[15] = 0; params[16] =-1; params[17] = 0; } void write_png(const char* filename, const int width, const int height, const int* imageR, const int* imageG, const int* imageB) { FILE* fp = fopen(filename, "wb"); assert(fp); png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); assert(png_ptr); png_infop info_ptr = png_create_info_struct(png_ptr); assert(info_ptr); png_init_io(png_ptr, fp); png_set_IHDR(png_ptr, info_ptr, width, height, 8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); png_write_info(png_ptr, info_ptr); // png_set_compression_level(png_ptr, 0); size_t row_size = 3 * width * sizeof(png_byte); png_bytep row = (png_bytep)malloc(row_size); for (int y = 0; y < height; ++y) { memset(row, 0, row_size); for (int x = 0; x < width; ++x) { png_bytep color = row + x * 3; color[0] = imageR[x + y * window_w]; color[1] = imageG[x + y * window_w]; color[2] = imageB[x + y * window_w]; } png_write_row(png_ptr, row); } free(row); png_write_end(png_ptr, NULL); png_destroy_write_struct(&png_ptr, &info_ptr); fclose(fp); } void create_png(vector<Vertex>& vertex_array, double t) { // allocate memory for image size_t image_size = window_w * window_h * sizeof(int); int* imageR = (int*)malloc(image_size); int* imageG = (int*)malloc(image_size); int* imageB = (int*)malloc(image_size); memset(imageR, 0, image_size); memset(imageG, 0, image_size); memset(imageB, 0, image_size); // plot the points for (size_t i = 0; i < vertex_array.size(); ++i) { Vector2f screenPt = vertex_array[i].position; // double Color color = vertex_array[i].color; // int int x = int(screenPt.x); int y = int(screenPt.y); if (screenPt.x > 0.0f && screenPt.y > 0.0f && screenPt.x < window_w && screenPt.y < window_h) { imageR[x + y * window_w] = abs(imageR[x + y * window_w] - color.r); imageG[x + y * window_w] = abs(imageG[x + y * window_w] - color.g); imageB[x + y * window_w] = abs(imageB[x + y * window_w] - color.b); } } // start I/O double file_name_double = (t + 3.0)/t_step; // cout << "filename: " << t << " "; char filename[30]; // sprintf(filename , "./pic/%06d.png" , int(file_name_double)); sprintf(filename, "./pic/%09d.png", int(file_name_double)); //cout << filename << endl; write_png(filename, window_w, window_h, imageR, imageG, imageB); free(imageR); free(imageG); free(imageB); } __global__ void compute_each_step(Vector2f* cuda_vector_array, double T) { // index int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int step = id ; step < 1000; step = step + stride) //steps = 2000 { double t = T + step * 1e-7; // bool isOffScreen = true; double x = t; double y = t; for (int iter = 0; iter < 800; ++iter) // 800 { const double xx = x * x; const double yy = y * y; const double tt = t * t; const double xy = x * y; const double xt = x * t; const double yt = y * t; const double nx = xx * 1 + yy * 0 + tt * 0 + xy * 0 + xt *-1 + yt * 1 + x *-1 + y * 0 + t * 0 ; const double ny = xx * 0 + yy *-1 + tt *-1 + xy *-1 + xt *-1 + yt *-1 + x * 0 + y *-1 + t * 0 ; x = nx; y = ny; Vector2f screenPt; screenPt.x = x; screenPt.y = y; ToScreen(screenPt); if (iter < 100) { screenPt.x = FLT_MAX; screenPt.y = FLT_MAX; } cuda_vector_array[step*800 + iter].x = screenPt.x; cuda_vector_array[step*800 + iter].y = screenPt.y; } //iteration end } // step end } void* thread_target(void* arg) { int* start = (int*) arg; int thread_num = int(start[0]); int which_io = 0; int full_hits = 0; // timestep setting double t = double(thread_num) - 3; double local_t_end = t + each_thread_step; // Setup the vertex array V result; result.vertex_array.resize(iters * steps_per_frame); // 800 * 2000 for (size_t i = 0; i < result.vertex_array.size(); ++i) result.vertex_array[i].color = GetRandColor(i % iters); while(t < local_t_end) { // wait for i/o if (vertex_array_queue[thread_num + which_io].unsafe_size() >= queue_size) { // cout << "full hits: " << ++full_hits << " ,which io thread: " << thread_num + which_io << endl; continue; }else { full_hits = 0; } // set GPU id if (thread_num == 0) cudaSetDevice(0); else cudaSetDevice(1); // GPU memory Vector2f* cuda_vector_array; Vector2f* vector_array = (Vector2f*)malloc(iters * steps_per_frame/2 * sizeof(Vector2f)); HANDLE_ERROR( cudaMalloc(&cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f))); /*********************** first round ***********************/ // invoke kernel compute_each_step<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(cuda_vector_array, t); // cathc error from kernel synchronize HANDLE_ERROR( cudaGetLastError()); // catch error from kernel asynchronize HANDLE_ERROR( cudaDeviceSynchronize()); HANDLE_ERROR( cudaMemcpy(&vector_array, cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f), cudaMemcpyDeviceToHost)); // copy data back to result.vertex_array for (size_t i = 0; i < result.vertex_array.size() / 2; ++i) { result.vertex_array[i].position.x = vector_array[i].x; result.vertex_array[i].position.y = vector_array[i].y; } t += 1000 * 1e-7; /*********************** secodn round ***********************/ // invoke kernel compute_each_step<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(cuda_vector_array, t); // cathc error from kernel synchronize HANDLE_ERROR( cudaGetLastError()); // catch error from kernel asynchronize HANDLE_ERROR( cudaDeviceSynchronize()); HANDLE_ERROR( cudaMemcpy(&vector_array, cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f), cudaMemcpyDeviceToHost)); // copy data back to result.vertex_array int st = result.vertex_array.size() / 2; for (size_t i = 0; i < result.vertex_array.size() / 2; ++i) { result.vertex_array[st + i].position.x = vector_array[i].x; result.vertex_array[st + i].position.y = vector_array[i].y; } t += 1000 * 1e-7; free(vector_array); // Draw the data // put the vertex array to queue result.t = t; vertex_array_queue[thread_num + which_io].push(result); which_io = (which_io + 1) % computing_to_io_ratio; } // t end cout << "computing thread: " << thread_num << " finished" << endl; result.t = -100; for (int i = 0; i < computing_to_io_ratio; i++) vertex_array_queue[thread_num + i].push(result); pthread_exit(NULL); } void* thread_io_target(void* arg) { int* start = (int*) arg; int io_num = int(start[0]); int empty_hits = 0; // cout << "io thread: " << io_num << " start working" << endl; while (true) { if (vertex_array_queue[io_num].empty()) { // cout << "empty hits: " << ++empty_hits <<", which io: " << io_num << endl; continue; }else { empty_hits = 0; } // take out the first result // V result = vertex_array_queue[io_num].front(); // vertex_array_queue[io_num].pop(); V result; if (!vertex_array_queue[io_num].try_pop(result)) continue; // check if the computing thread finished if (result.t == -100) break; vector<Vertex> vertex_array; vertex_array.resize(result.vertex_array.size()); vertex_array = result.vertex_array; double t = result.t; create_png(vertex_array, t); } cout << "io thread " << io_num << " exits" << endl; pthread_exit(NULL); } int main(int argc, char* argv[]) { cout << "start computing........." << endl; chrono::steady_clock::time_point t1 = chrono::steady_clock::now(); start = clock(); rand_gen.seed((unsigned int)time(0)); // Initialize random parameters ResetPlot(); RandParams(params); pthread_t computing_threads[num_computing_threads]; pthread_t io_threads[num_io_threads]; // create computing threads for (int i = 0; i < num_computing_threads; ++i) assert (0 == pthread_create(&computing_threads[i], NULL, thread_target, (void*) &start_points[i])); // create i/o threads for (int i = 0; i < num_io_threads; ++i) assert (0 == pthread_create(&io_threads[i], NULL, thread_io_target, (void*) &io_point[i])); // join computing threads for (int i = 0; i < num_computing_threads; ++i) assert(0 == pthread_join(computing_threads[i], NULL)); // join computing threads for (int i = 0; i < num_io_threads; ++i) assert(0 == pthread_join(io_threads[i], NULL)); stop = clock(); chrono::steady_clock::time_point t2 = chrono::steady_clock::now(); cout << double(stop - start) / CLOCKS_PER_SEC << endl; cout <<"total time: " << chrono::duration_cast<chrono::microseconds>(t2 - t1).count() << " us" << endl; return 0; }
code for sm_80 Function : _Z17compute_each_stepP8Vector2fd .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GT.AND P0, PT, R0, 0x3e7, PT ; /* 0x000003e70000780c */ /* 0x000fda0003f04270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ ULDC.64 UR4, c[0x0][0x160] ; /* 0x0000580000047ab9 */ /* 0x000fe40000000a00 */ /*0070*/ UIADD3 UR4, UP0, UR4, 0x8, URZ ; /* 0x0000000804047890 */ /* 0x000fc8000ff1e03f */ /*0080*/ UIADD3.X UR5, URZ, UR5, URZ, UP0, !UPT ; /* 0x000000053f057290 */ /* 0x000fe400087fe43f */ /*0090*/ IMAD.U32 R4, RZ, RZ, UR4 ; /* 0x00000004ff047e24 */ /* 0x000fc8000f8e00ff */ /*00a0*/ IMAD.U32 R5, RZ, RZ, UR5 ; /* 0x00000005ff057e24 */ /* 0x000fe2000f8e00ff */ /*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*00c0*/ I2F.F64 R6, R0 ; /* 0x0000000000067312 */ /* 0x000e220000201c00 */ /*00d0*/ IMAD.MOV.U32 R2, RZ, RZ, -0x654350b8 ; /* 0x9abcaf48ff027424 */ /* 0x000fe400078e00ff */ /*00e0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x3e7ad7f2 ; /* 0x3e7ad7f2ff037424 */ /* 0x000fe400078e00ff */ /*00f0*/ IMAD R11, R0, 0x320, RZ ; /* 0x00000320000b7824 */ /* 0x000fe400078e02ff */ /*0100*/ IMAD.MOV.U32 R26, RZ, RZ, RZ ; /* 0x000000ffff1a7224 */ /* 0x000fc600078e00ff */ /*0110*/ IADD3 R9, R11.reuse, 0x1, RZ ; /* 0x000000010b097810 */ /* 0x040fe20007ffe0ff */ /*0120*/ IMAD.WIDE R10, R11, 0x10, R4 ; /* 0x000000100b0a7825 */ /* 0x000fe200078e0204 */ /*0130*/ DFMA R6, R6, R2, c[0x0][0x168] ; /* 0x00005a000606762b */ /* 0x0030460000000002 */ /*0140*/ IMAD.WIDE R2, R9, 0x10, R4 ; /* 0x0000001009027825 */ /* 0x001fc600078e0204 */ /*0150*/ DMUL R8, R6, R6 ; /* 0x0000000606087228 */ /* 0x0020480000000000 */ /*0160*/ IMAD.MOV.U32 R18, RZ, RZ, R6.reuse ; /* 0x000000ffff127224 */ /* 0x100fe400078e0006 */ /*0170*/ IMAD.MOV.U32 R19, RZ, RZ, R7.reuse ; /* 0x000000ffff137224 */ /* 0x100fe400078e0007 */ /*0180*/ IMAD.MOV.U32 R16, RZ, RZ, R6 ; /* 0x000000ffff107224 */ /* 0x000fe400078e0006 */ /*0190*/ IMAD.MOV.U32 R17, RZ, RZ, R7 ; /* 0x000000ffff117224 */ /* 0x003fe400078e0007 */ /*01a0*/ DMUL R22, R18, R18 ; /* 0x0000001212167228 */ /* 0x000fe20000000000 */ /*01b0*/ ISETP.GE.U32.AND P0, PT, R26, 0x64, PT ; /* 0x000000641a00780c */ /* 0x000fc60003f06070 */ /*01c0*/ DMUL R12, R16, R16 ; /* 0x00000010100c7228 */ /* 0x002e080000000000 */ /*01d0*/ DMUL R14, R16, R18 ; /* 0x00000012100e7228 */ /* 0x000fc80000000000 */ /*01e0*/ DFMA R20, RZ, R12, -R22 ; /* 0x0000000cff14722b */ /* 0x001e080000000816 */ /*01f0*/ DFMA R12, RZ, R22, R12 ; /* 0x00000016ff0c722b */ /* 0x000e48000000000c */ /*0200*/ DADD R20, -R8, R20 ; /* 0x0000000008147229 */ /* 0x001e080000000114 */ /*0210*/ DFMA R12, RZ, R8, R12 ; /* 0x00000008ff0c722b */ /* 0x002e48000000000c */ /*0220*/ DADD R20, -R14, R20 ; /* 0x000000000e147229 */ /* 0x001e080000000114 */ /*0230*/ DFMA R12, RZ, R14, R12 ; /* 0x0000000eff0c722b */ /* 0x002e48000000000c */ /*0240*/ DFMA R20, -R6, R16, R20 ; /* 0x000000100614722b */ /* 0x001e080000000114 */ /*0250*/ DFMA R12, -R6, R16, R12 ; /* 0x00000010060c722b */ /* 0x002e48000000010c */ /*0260*/ DFMA R20, -R6, R18, R20 ; /* 0x000000120614722b */ /* 0x001e080000000114 */ /*0270*/ DFMA R12, R6, R18, R12 ; /* 0x00000012060c722b */ /* 0x002e48000000000c */ /*0280*/ DFMA R20, RZ, R16, R20 ; /* 0x00000010ff14722b */ /* 0x001e080000000014 */ /*0290*/ DADD R12, R12, -R16 ; /* 0x000000000c0c7229 */ /* 0x002e480000000810 */ /*02a0*/ DADD R20, R20, -R18 ; /* 0x0000000014147229 */ /* 0x001e080000000812 */ /*02b0*/ DFMA R12, RZ, R18, R12 ; /* 0x00000012ff0c722b */ /* 0x002e48000000000c */ /*02c0*/ DFMA R16, RZ, R6, R20 ; /* 0x00000006ff10722b */ /* 0x001e080000000014 */ /*02d0*/ DFMA R18, RZ, R6, R12 ; /* 0x00000006ff12722b */ /* 0x0022a4000000000c */ /*02e0*/ F2F.F32.F64 R20, R16 ; /* 0x0000001000147310 */ /* 0x001e220000301000 */ /*02f0*/ IMAD.MOV.U32 R12, RZ, RZ, 0x0 ; /* 0x00000000ff0c7424 */ /* 0x002fe400078e00ff */ /*0300*/ IMAD.MOV.U32 R13, RZ, RZ, 0x40690000 ; /* 0x40690000ff0d7424 */ /* 0x000fca00078e00ff */ /*0310*/ F2F.F32.F64 R27, R18 ; /* 0x00000012001b7310 */ /* 0x004e700000301000 */ /*0320*/ F2F.F64.F32 R20, R20 ; /* 0x0000001400147310 */ /* 0x001e300000201800 */ /*0330*/ F2F.F64.F32 R14, R27 ; /* 0x0000001b000e7310 */ /* 0x002e620000201800 */ /*0340*/ DFMA R24, R20, R12, 450 ; /* 0x407c20001418742b */ /* 0x001e0e000000000c */ /*0350*/ F2F.F32.F64 R21, R24 ; /* 0x0000001800157310 */ /* 0x0010a20000301000 */ /*0360*/ DFMA R22, R14, R12, 800 ; /* 0x408900000e16742b */ /* 0x002e48000000000c */ /*0370*/ DMUL R24, R16, R16 ; /* 0x0000001010187228 */ /* 0x001fcc0000000000 */ /*0380*/ F2F.F32.F64 R22, R22 ; /* 0x0000001600167310 */ /* 0x002e300000301000 */ /*0390*/ F2F.F64.F32 R20, R21 ; /* 0x0000001500147310 */ /* 0x004e700000201800 */ /*03a0*/ F2F.F64.F32 R14, R22 ; /* 0x00000016000e7310 */ /* 0x001e220000201800 */ /*03b0*/ FSEL R28, R20, -3.68934881474191032320e+19, P0 ; /* 0xe0000000141c7808 */ /* 0x002fc40000000000 */ /*03c0*/ FSEL R29, R21, 122879.9921875, P0 ; /* 0x47efffff151d7808 */ /* 0x000fe20000000000 */ /*03d0*/ DMUL R20, R18, R18 ; /* 0x0000001212147228 */ /* 0x000e480000000000 */ /*03e0*/ STG.E.64 [R10.64], R28 ; /* 0x0000001c0a007986 */ /* 0x000fe2000c101b04 */ /*03f0*/ FSEL R14, R14, -3.68934881474191032320e+19, P0 ; /* 0xe00000000e0e7808 */ /* 0x001fe20000000000 */ /*0400*/ DFMA R22, RZ, R24, R20 ; /* 0x00000018ff16722b */ /* 0x002fe20000000014 */ /*0410*/ FSEL R15, R15, 122879.9921875, P0 ; /* 0x47efffff0f0f7808 */ /* 0x000fc60000000000 */ /*0420*/ DFMA R24, RZ, R20, -R24 ; /* 0x00000014ff18722b */ /* 0x000e240000000818 */ /*0430*/ STG.E.64 [R10.64+-0x8], R14 ; /* 0xfffff80e0a007986 */ /* 0x0003e4000c101b04 */ /*0440*/ DMUL R20, R18, R16 ; /* 0x0000001012147228 */ /* 0x000fc80000000000 */ /*0450*/ DADD R24, -R8, R24 ; /* 0x0000000008187229 */ /* 0x001e080000000118 */ /*0460*/ DFMA R22, RZ, R8, R22 ; /* 0x00000008ff16722b */ /* 0x000e880000000016 */ /*0470*/ DADD R24, -R20, R24 ; /* 0x0000000014187229 */ /* 0x001e220000000118 */ /*0480*/ IMAD.MOV.U32 R14, RZ, RZ, R2 ; /* 0x000000ffff0e7224 */ /* 0x002fe200078e0002 */ /*0490*/ IADD3 R10, P1, R10, 0x40, RZ ; /* 0x000000400a0a7810 */ /* 0x000fe20007f3e0ff */ /*04a0*/ IMAD.MOV.U32 R15, RZ, RZ, R3 ; /* 0x000000ffff0f7224 */ /* 0x000fe200078e0003 */ /*04b0*/ DFMA R22, RZ, R20, R22 ; /* 0x00000014ff16722b */ /* 0x004e460000000016 */ /*04c0*/ IMAD.X R11, RZ, RZ, R11, P1 ; /* 0x000000ffff0b7224 */ /* 0x000fe200008e060b */ /*04d0*/ DFMA R20, -R6, R18, R24 ; /* 0x000000120614722b */ /* 0x001e080000000118 */ /*04e0*/ DFMA R22, -R6, R18, R22 ; /* 0x000000120616722b */ /* 0x002e480000000116 */ /*04f0*/ DFMA R20, -R6, R16, R20 ; /* 0x000000100614722b */ /* 0x001e080000000114 */ /*0500*/ DFMA R22, R6, R16, R22 ; /* 0x000000100616722b */ /* 0x002e480000000016 */ /*0510*/ DFMA R20, RZ, R18, R20 ; /* 0x00000012ff14722b */ /* 0x001e080000000014 */ /*0520*/ DADD R22, -R18, R22 ; /* 0x0000000012167229 */ /* 0x002e480000000116 */ /*0530*/ DADD R20, -R16, R20 ; /* 0x0000000010147229 */ /* 0x001e080000000114 */ /*0540*/ DFMA R22, RZ, R16, R22 ; /* 0x00000010ff16722b */ /* 0x002e480000000016 */ /*0550*/ DFMA R18, RZ, R6, R20 ; /* 0x00000006ff12722b */ /* 0x001e080000000014 */ /*0560*/ DFMA R22, RZ, R6, R22 ; /* 0x00000006ff16722b */ /* 0x002e640000000016 */ /*0570*/ F2F.F32.F64 R25, R18 ; /* 0x0000001200197310 */ /* 0x001e300000301000 */ /*0580*/ F2F.F32.F64 R24, R22 ; /* 0x0000001600187310 */ /* 0x002e700000301000 */ /*0590*/ F2F.F64.F32 R20, R25 ; /* 0x0000001900147310 */ /* 0x001e300000201800 */ /*05a0*/ F2F.F64.F32 R16, R24 ; /* 0x0000001800107310 */ /* 0x0022a20000201800 */ /*05b0*/ DFMA R20, R20, R12, 450 ; /* 0x407c20001414742b */ /* 0x001e08000000000c */ /*05c0*/ DMUL R24, R22, R22 ; /* 0x0000001616187228 */ /* 0x002fc80000000000 */ /*05d0*/ DFMA R28, R16, R12, 800 ; /* 0x40890000101c742b */ /* 0x004e64000000000c */ /*05e0*/ F2F.F32.F64 R20, R20 ; /* 0x0000001400147310 */ /* 0x0010b00000301000 */ /*05f0*/ F2F.F32.F64 R27, R28 ; /* 0x0000001c001b7310 */ /* 0x0022e20000301000 */ /*0600*/ IADD3 R21, R26, 0x1, RZ ; /* 0x000000011a157810 */ /* 0x001fc80007ffe0ff */ /*0610*/ ISETP.GE.U32.AND P0, PT, R21, 0x64, PT ; /* 0x000000641500780c */ /* 0x000fc60003f06070 */ /*0620*/ F2F.F64.F32 R16, R20 ; /* 0x0000001400107310 */ /* 0x004e220000201800 */ /*0630*/ DMUL R28, R18, R18 ; /* 0x00000012121c7228 */ /* 0x002e4e0000000000 */ /*0640*/ F2F.F64.F32 R20, R27 ; /* 0x0000001b00147310 */ /* 0x008ea20000201800 */ /*0650*/ FSEL R16, R16, -3.68934881474191032320e+19, P0 ; /* 0xe000000010107808 */ /* 0x001fe40000000000 */ /*0660*/ FSEL R17, R17, 122879.9921875, P0 ; /* 0x47efffff11117808 */ /* 0x000fca0000000000 */ /*0670*/ STG.E.64 [R14.64], R16 ; /* 0x000000100e007986 */ /* 0x0001e2000c101b04 */ /*0680*/ FSEL R2, R20, -3.68934881474191032320e+19, P0 ; /* 0xe000000014027808 */ /* 0x004fe40000000000 */ /*0690*/ FSEL R3, R21, 122879.9921875, P0 ; /* 0x47efffff15037808 */ /* 0x000fe20000000000 */ /*06a0*/ DFMA R20, RZ, R28, R24 ; /* 0x0000001cff14722b */ /* 0x002e480000000018 */ /*06b0*/ DFMA R28, RZ, R24, -R28 ; /* 0x00000018ff1c722b */ /* 0x000ea2000000081c */ /*06c0*/ STG.E.64 [R14.64+-0x8], R2 ; /* 0xfffff8020e007986 */ /* 0x000fe6000c101b04 */ /*06d0*/ DFMA R24, RZ, R8, R20 ; /* 0x00000008ff18722b */ /* 0x002fc80000000014 */ /*06e0*/ DADD R20, -R8, R28 ; /* 0x0000000008147229 */ /* 0x004fc8000000011c */ /*06f0*/ DMUL R28, R22, R18 ; /* 0x00000012161c7228 */ /* 0x000e4c0000000000 */ /*0700*/ DADD R20, -R28, R20 ; /* 0x000000001c147229 */ /* 0x002e480000000114 */ /*0710*/ DFMA R24, RZ, R28, R24 ; /* 0x0000001cff18722b */ /* 0x000e880000000018 */ /*0720*/ DFMA R20, -R6, R22, R20 ; /* 0x000000160614722b */ /* 0x002e480000000114 */ /*0730*/ DFMA R24, -R6, R22, R24 ; /* 0x000000160618722b */ /* 0x004e880000000118 */ /*0740*/ DFMA R20, -R6, R18, R20 ; /* 0x000000120614722b */ /* 0x002e480000000114 */ /*0750*/ DFMA R24, R6, R18, R24 ; /* 0x000000120618722b */ /* 0x004e880000000018 */ /*0760*/ DFMA R20, RZ, R22, R20 ; /* 0x00000016ff14722b */ /* 0x002e080000000014 */ /*0770*/ DADD R24, -R22, R24 ; /* 0x0000000016187229 */ /* 0x004e480000000118 */ /*0780*/ DADD R16, -R18, R20 ; /* 0x0000000012107229 */ /* 0x001e080000000114 */ /*0790*/ DFMA R18, RZ, R18, R24 ; /* 0x00000012ff12722b */ /* 0x002e480000000018 */ /*07a0*/ DFMA R16, RZ, R6, R16 ; /* 0x00000006ff10722b */ /* 0x001e080000000010 */ /*07b0*/ DFMA R18, RZ, R6, R18 ; /* 0x00000006ff12722b */ /* 0x002e640000000012 */ /*07c0*/ F2F.F32.F64 R22, R16 ; /* 0x0000001000167310 */ /* 0x001e300000301000 */ /*07d0*/ F2F.F32.F64 R27, R18 ; /* 0x00000012001b7310 */ /* 0x002e700000301000 */ /*07e0*/ F2F.F64.F32 R22, R22 ; /* 0x0000001600167310 */ /* 0x001e300000201800 */ /*07f0*/ F2F.F64.F32 R20, R27 ; /* 0x0000001b00147310 */ /* 0x002e620000201800 */ /*0800*/ DFMA R24, R22, R12, 450 ; /* 0x407c20001618742b */ /* 0x001e0e000000000c */ /*0810*/ F2F.F32.F64 R23, R24 ; /* 0x0000001800177310 */ /* 0x001e220000301000 */ /*0820*/ DFMA R28, R20, R12, 800 ; /* 0x40890000141c742b */ /* 0x0022a4000000000c */ /*0830*/ IADD3 R20, R26, 0x2, RZ ; /* 0x000000021a147810 */ /* 0x002fc80007ffe0ff */ /*0840*/ ISETP.GE.U32.AND P0, PT, R20, 0x64, PT ; /* 0x000000641400780c */ /* 0x000fc80003f06070 */ /*0850*/ F2F.F32.F64 R28, R28 ; /* 0x0000001c001c7310 */ /* 0x004e700000301000 */ /*0860*/ F2F.F64.F32 R22, R23 ; /* 0x0000001700167310 */ /* 0x001e300000201800 */ /*0870*/ F2F.F64.F32 R20, R28 ; /* 0x0000001c00147310 */ /* 0x002e620000201800 */ /*0880*/ FSEL R24, R22, -3.68934881474191032320e+19, P0 ; /* 0xe000000016187808 */ /* 0x001fc40000000000 */ /*0890*/ FSEL R25, R23, 122879.9921875, P0 ; /* 0x47efffff17197808 */ /* 0x000fe20000000000 */ /*08a0*/ DMUL R22, R18, R18 ; /* 0x0000001212167228 */ /* 0x000fc80000000000 */ /*08b0*/ STG.E.64 [R14.64+0x10], R24 ; /* 0x000010180e007986 */ /* 0x0001e2000c101b04 */ /*08c0*/ FSEL R20, R20, -3.68934881474191032320e+19, P0 ; /* 0xe000000014147808 */ /* 0x002fe40000000000 */ /*08d0*/ FSEL R21, R21, 122879.9921875, P0 ; /* 0x47efffff15157808 */ /* 0x000fca0000000000 */ /*08e0*/ STG.E.64 [R14.64+0x8], R20 ; /* 0x000008140e007986 */ /* 0x0003e2000c101b04 */ /*08f0*/ DMUL R24, R16, R16 ; /* 0x0000001010187228 */ /* 0x001e0c0000000000 */ /*0900*/ DFMA R28, RZ, R24, R22 ; /* 0x00000018ff1c722b */ /* 0x001e080000000016 */ /*0910*/ DFMA R22, RZ, R22, -R24 ; /* 0x00000016ff16722b */ /* 0x000e880000000818 */ /*0920*/ DFMA R28, RZ, R8, R28 ; /* 0x00000008ff1c722b */ /* 0x001fc8000000001c */ /*0930*/ DADD R24, -R8, R22 ; /* 0x0000000008187229 */ /* 0x004fc80000000116 */ /*0940*/ DMUL R22, R18, R16 ; /* 0x0000001012167228 */ /* 0x000e0c0000000000 */ /*0950*/ DADD R2, -R22, R24 ; /* 0x0000000016027229 */ /* 0x001e080000000118 */ /*0960*/ DFMA R28, RZ, R22, R28 ; /* 0x00000016ff1c722b */ /* 0x000e88000000001c */ /*0970*/ DFMA R2, -R6, R18, R2 ; /* 0x000000120602722b */ /* 0x001e080000000102 */ /*0980*/ DFMA R28, -R6, R18, R28 ; /* 0x00000012061c722b */ /* 0x004e88000000011c */ /*0990*/ DFMA R2, -R6, R16, R2 ; /* 0x000000100602722b */ /* 0x001e080000000102 */ /*09a0*/ DFMA R28, R6, R16, R28 ; /* 0x00000010061c722b */ /* 0x004e88000000001c */ /*09b0*/ DFMA R2, RZ, R18, R2 ; /* 0x00000012ff02722b */ /* 0x001e080000000002 */ /*09c0*/ DADD R28, -R18, R28 ; /* 0x00000000121c7229 */ /* 0x004e88000000011c */ /*09d0*/ DADD R2, -R16, R2 ; /* 0x0000000010027229 */ /* 0x001e080000000102 */ /*09e0*/ DFMA R16, RZ, R16, R28 ; /* 0x00000010ff10722b */ /* 0x0044e4000000001c */ /*09f0*/ IADD3 R29, R26.reuse, 0x3, RZ ; /* 0x000000031a1d7810 */ /* 0x044fe40007ffe0ff */ /*0a00*/ DFMA R18, RZ, R6.reuse, R2 ; /* 0x00000006ff12722b */ /* 0x081e220000000002 */ /*0a10*/ IADD3 R26, R26, 0x4, RZ ; /* 0x000000041a1a7810 */ /* 0x000fe40007ffe0ff */ /*0a20*/ ISETP.GE.U32.AND P0, PT, R29, 0x64, PT ; /* 0x000000641d00780c */ /* 0x000fe20003f06070 */ /*0a30*/ DFMA R16, RZ, R6, R16 ; /* 0x00000006ff10722b */ /* 0x008ea40000000010 */ /*0a40*/ F2F.F32.F64 R28, R18 ; /* 0x00000012001c7310 */ /* 0x001e300000301000 */ /*0a50*/ F2F.F32.F64 R27, R16 ; /* 0x00000010001b7310 */ /* 0x004eb00000301000 */ /*0a60*/ F2F.F64.F32 R22, R28 ; /* 0x0000001c00167310 */ /* 0x001e300000201800 */ /*0a70*/ F2F.F64.F32 R2, R27 ; /* 0x0000001b00027310 */ /* 0x004ea20000201800 */ /*0a80*/ DFMA R22, R22, R12, 450 ; /* 0x407c20001616742b */ /* 0x001e08000000000c */ /*0a90*/ DFMA R2, R2, R12, 800 ; /* 0x408900000202742b */ /* 0x004e8c000000000c */ /*0aa0*/ F2F.F32.F64 R22, R22 ; /* 0x0000001600167310 */ /* 0x001e300000301000 */ /*0ab0*/ F2F.F32.F64 R2, R2 ; /* 0x0000000200027310 */ /* 0x004eb00000301000 */ /*0ac0*/ F2F.F64.F32 R24, R22 ; /* 0x0000001600187310 */ /* 0x001e300000201800 */ /*0ad0*/ F2F.F64.F32 R12, R2 ; /* 0x00000002000c7310 */ /* 0x0044e20000201800 */ /*0ae0*/ FSEL R24, R24, -3.68934881474191032320e+19, P0 ; /* 0xe000000018187808 */ /* 0x001fc40000000000 */ /*0af0*/ FSEL R25, R25, 122879.9921875, P0 ; /* 0x47efffff19197808 */ /* 0x000fe40000000000 */ /*0b00*/ IADD3 R2, P2, R14, 0x40, RZ ; /* 0x000000400e027810 */ /* 0x004fc60007f5e0ff */ /*0b10*/ STG.E.64 [R14.64+0x20], R24 ; /* 0x000020180e007986 */ /* 0x0003e2000c101b04 */ /*0b20*/ FSEL R12, R12, -3.68934881474191032320e+19, P0 ; /* 0xe00000000c0c7808 */ /* 0x008fe20000000000 */ /*0b30*/ IMAD.X R3, RZ, RZ, R15, P2 ; /* 0x000000ffff037224 */ /* 0x000fe200010e060f */ /*0b40*/ FSEL R13, R13, 122879.9921875, P0 ; /* 0x47efffff0d0d7808 */ /* 0x000fe40000000000 */ /*0b50*/ ISETP.NE.AND P0, PT, R26, 0x320, PT ; /* 0x000003201a00780c */ /* 0x000fc60003f05270 */ /*0b60*/ STG.E.64 [R14.64+0x18], R12 ; /* 0x0000180c0e007986 */ /* 0x0003f4000c101b04 */ /*0b70*/ @P0 BRA 0x1a0 ; /* 0xfffff62000000947 */ /* 0x000fea000383ffff */ /*0b80*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff037624 */ /* 0x000fc800078e00ff */ /*0b90*/ IMAD R0, R3, c[0x0][0xc], R0 ; /* 0x0000030003007a24 */ /* 0x000fca00078e0200 */ /*0ba0*/ ISETP.GE.AND P0, PT, R0, 0x3e8, PT ; /* 0x000003e80000780c */ /* 0x000fda0003f06270 */ /*0bb0*/ @!P0 BRA 0xc0 ; /* 0xfffff50000008947 */ /* 0x000fea000383ffff */ /*0bc0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0bd0*/ BRA 0xbd0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0be0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0bf0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <random> #include <sstream> #include <cassert> #include <fstream> #include <cfloat> #include <cstdlib> #include <iostream> #include <string> #include <ctime> #include <queue> #include <pthread.h> #include <png.h> #include "tbb/concurrent_queue.h" #include <chrono> #include <cuda.h> #include <cuda_runtime.h> using namespace std; using namespace tbb; #define BLOCKS_PER_GRID 32 #define THREADS_PER_BLOCK 32 clock_t start, stop; //Global constants #define t_step 1e-7 #define queue_size 25 static const int iters = 800; static const int steps_per_frame = 2000; static const double delta_per_step = 1e-5; static const double delta_minimum = 1e-7; static const double t_start = -3.0; static const double t_end = 3.0; static const int fad_speed = 10; static std::mt19937 rand_gen; static const float dot_sizes[3] = { 1.0f, 3.0f, 10.0f }; static const int num_params = 18; double params[num_params]; // 18 //Global variables static int window_w = 1600; static int window_h = 900; static int window_bits = 24; static float plot_scale = 0.25f; static float plot_x = 0.0f; static float plot_y = 0.0f; // thread constants int num_computing_threads = 2; int num_io_threads = 6; // int start_points[6] = {0, 1, 2, 3, 4, 5}; // int start_points[3] = {0, 2, 4}; // start from [i] - 3; int start_points[2] = {0, 3}; int each_thread_step = 3;//int(6/num_computing_threads); // -3 ~ 3 / num_computing_therads int io_point[6] = {0, 1, 2, 3, 4, 5}; int computing_to_io_ratio = 3; // 1 computing thread maps to 3 io thread struct Color{ int r; int g; int b; }; struct Vector2f{ double x; double y; } ; struct Vertex{ Vector2f position; Color color; }; struct raw_vector2f{ double* xs; double* ys; }; struct V{ vector<Vertex> vertex_array; double t; }; // queue<V> vertex_array_queue[6]; concurrent_queue<V> vertex_array_queue[6]; inline void e(cudaError_t err, const char* file, int line) { if (err != cudaSuccess) { printf("Error in %s at line %d:\n\t%s\n", file, line, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } #define HANDLE_ERROR(err) ( e(err, __FILE__, __LINE__) ) static Color GetRandColor(int i) { i += 1; int r = std::min(255, 50 + (i * 11909) % 256); int g = std::min(255, 50 + (i * 52973) % 256); int b = std::min(255, 50 + (i * 44111) % 256); return Color{r, g, b}; } static void ResetPlot() { plot_scale = 0.25f; plot_x = 0.0f; plot_y = 0.0f; } __device__ void ToScreen(Vector2f& screenPt) { const float s = 0.25f * 1600.0 / 2.0; const float nx = 1600.0 * 0.5f + (float(screenPt.x) - 0.0) * s; const float ny = 900.0 * 0.5f + (float(screenPt.y) - 0.0) * s; screenPt.x = nx; screenPt.y = ny; // return Vector2f{nx, ny}; } static void RandParams(double* params) { params[ 0] = 1; params[ 1] = 0; params[ 2] = 0; params[ 3] = 0; params[ 4] =-1; params[ 5] = 1; params[ 6] =-1; params[ 7] = 0; params[ 8] = 0; params[ 9] = 0; params[10] =-1; params[11] =-1; params[12] =-1; params[13] =-1; params[14] =-1; params[15] = 0; params[16] =-1; params[17] = 0; } void write_png(const char* filename, const int width, const int height, const int* imageR, const int* imageG, const int* imageB) { FILE* fp = fopen(filename, "wb"); assert(fp); png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); assert(png_ptr); png_infop info_ptr = png_create_info_struct(png_ptr); assert(info_ptr); png_init_io(png_ptr, fp); png_set_IHDR(png_ptr, info_ptr, width, height, 8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); png_write_info(png_ptr, info_ptr); // png_set_compression_level(png_ptr, 0); size_t row_size = 3 * width * sizeof(png_byte); png_bytep row = (png_bytep)malloc(row_size); for (int y = 0; y < height; ++y) { memset(row, 0, row_size); for (int x = 0; x < width; ++x) { png_bytep color = row + x * 3; color[0] = imageR[x + y * window_w]; color[1] = imageG[x + y * window_w]; color[2] = imageB[x + y * window_w]; } png_write_row(png_ptr, row); } free(row); png_write_end(png_ptr, NULL); png_destroy_write_struct(&png_ptr, &info_ptr); fclose(fp); } void create_png(vector<Vertex>& vertex_array, double t) { // allocate memory for image size_t image_size = window_w * window_h * sizeof(int); int* imageR = (int*)malloc(image_size); int* imageG = (int*)malloc(image_size); int* imageB = (int*)malloc(image_size); memset(imageR, 0, image_size); memset(imageG, 0, image_size); memset(imageB, 0, image_size); // plot the points for (size_t i = 0; i < vertex_array.size(); ++i) { Vector2f screenPt = vertex_array[i].position; // double Color color = vertex_array[i].color; // int int x = int(screenPt.x); int y = int(screenPt.y); if (screenPt.x > 0.0f && screenPt.y > 0.0f && screenPt.x < window_w && screenPt.y < window_h) { imageR[x + y * window_w] = abs(imageR[x + y * window_w] - color.r); imageG[x + y * window_w] = abs(imageG[x + y * window_w] - color.g); imageB[x + y * window_w] = abs(imageB[x + y * window_w] - color.b); } } // start I/O double file_name_double = (t + 3.0)/t_step; // cout << "filename: " << t << " "; char filename[30]; // sprintf(filename , "./pic/%06d.png" , int(file_name_double)); sprintf(filename, "./pic/%09d.png", int(file_name_double)); //cout << filename << endl; write_png(filename, window_w, window_h, imageR, imageG, imageB); free(imageR); free(imageG); free(imageB); } __global__ void compute_each_step(Vector2f* cuda_vector_array, double T) { // index int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int step = id ; step < 1000; step = step + stride) //steps = 2000 { double t = T + step * 1e-7; // bool isOffScreen = true; double x = t; double y = t; for (int iter = 0; iter < 800; ++iter) // 800 { const double xx = x * x; const double yy = y * y; const double tt = t * t; const double xy = x * y; const double xt = x * t; const double yt = y * t; const double nx = xx * 1 + yy * 0 + tt * 0 + xy * 0 + xt *-1 + yt * 1 + x *-1 + y * 0 + t * 0 ; const double ny = xx * 0 + yy *-1 + tt *-1 + xy *-1 + xt *-1 + yt *-1 + x * 0 + y *-1 + t * 0 ; x = nx; y = ny; Vector2f screenPt; screenPt.x = x; screenPt.y = y; ToScreen(screenPt); if (iter < 100) { screenPt.x = FLT_MAX; screenPt.y = FLT_MAX; } cuda_vector_array[step*800 + iter].x = screenPt.x; cuda_vector_array[step*800 + iter].y = screenPt.y; } //iteration end } // step end } void* thread_target(void* arg) { int* start = (int*) arg; int thread_num = int(start[0]); int which_io = 0; int full_hits = 0; // timestep setting double t = double(thread_num) - 3; double local_t_end = t + each_thread_step; // Setup the vertex array V result; result.vertex_array.resize(iters * steps_per_frame); // 800 * 2000 for (size_t i = 0; i < result.vertex_array.size(); ++i) result.vertex_array[i].color = GetRandColor(i % iters); while(t < local_t_end) { // wait for i/o if (vertex_array_queue[thread_num + which_io].unsafe_size() >= queue_size) { // cout << "full hits: " << ++full_hits << " ,which io thread: " << thread_num + which_io << endl; continue; }else { full_hits = 0; } // set GPU id if (thread_num == 0) cudaSetDevice(0); else cudaSetDevice(1); // GPU memory Vector2f* cuda_vector_array; Vector2f* vector_array = (Vector2f*)malloc(iters * steps_per_frame/2 * sizeof(Vector2f)); HANDLE_ERROR( cudaMalloc(&cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f))); /*********************** first round ***********************/ // invoke kernel compute_each_step<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(cuda_vector_array, t); // cathc error from kernel synchronize HANDLE_ERROR( cudaGetLastError()); // catch error from kernel asynchronize HANDLE_ERROR( cudaDeviceSynchronize()); HANDLE_ERROR( cudaMemcpy(&vector_array, cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f), cudaMemcpyDeviceToHost)); // copy data back to result.vertex_array for (size_t i = 0; i < result.vertex_array.size() / 2; ++i) { result.vertex_array[i].position.x = vector_array[i].x; result.vertex_array[i].position.y = vector_array[i].y; } t += 1000 * 1e-7; /*********************** secodn round ***********************/ // invoke kernel compute_each_step<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(cuda_vector_array, t); // cathc error from kernel synchronize HANDLE_ERROR( cudaGetLastError()); // catch error from kernel asynchronize HANDLE_ERROR( cudaDeviceSynchronize()); HANDLE_ERROR( cudaMemcpy(&vector_array, cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f), cudaMemcpyDeviceToHost)); // copy data back to result.vertex_array int st = result.vertex_array.size() / 2; for (size_t i = 0; i < result.vertex_array.size() / 2; ++i) { result.vertex_array[st + i].position.x = vector_array[i].x; result.vertex_array[st + i].position.y = vector_array[i].y; } t += 1000 * 1e-7; free(vector_array); // Draw the data // put the vertex array to queue result.t = t; vertex_array_queue[thread_num + which_io].push(result); which_io = (which_io + 1) % computing_to_io_ratio; } // t end cout << "computing thread: " << thread_num << " finished" << endl; result.t = -100; for (int i = 0; i < computing_to_io_ratio; i++) vertex_array_queue[thread_num + i].push(result); pthread_exit(NULL); } void* thread_io_target(void* arg) { int* start = (int*) arg; int io_num = int(start[0]); int empty_hits = 0; // cout << "io thread: " << io_num << " start working" << endl; while (true) { if (vertex_array_queue[io_num].empty()) { // cout << "empty hits: " << ++empty_hits <<", which io: " << io_num << endl; continue; }else { empty_hits = 0; } // take out the first result // V result = vertex_array_queue[io_num].front(); // vertex_array_queue[io_num].pop(); V result; if (!vertex_array_queue[io_num].try_pop(result)) continue; // check if the computing thread finished if (result.t == -100) break; vector<Vertex> vertex_array; vertex_array.resize(result.vertex_array.size()); vertex_array = result.vertex_array; double t = result.t; create_png(vertex_array, t); } cout << "io thread " << io_num << " exits" << endl; pthread_exit(NULL); } int main(int argc, char* argv[]) { cout << "start computing........." << endl; chrono::steady_clock::time_point t1 = chrono::steady_clock::now(); start = clock(); rand_gen.seed((unsigned int)time(0)); // Initialize random parameters ResetPlot(); RandParams(params); pthread_t computing_threads[num_computing_threads]; pthread_t io_threads[num_io_threads]; // create computing threads for (int i = 0; i < num_computing_threads; ++i) assert (0 == pthread_create(&computing_threads[i], NULL, thread_target, (void*) &start_points[i])); // create i/o threads for (int i = 0; i < num_io_threads; ++i) assert (0 == pthread_create(&io_threads[i], NULL, thread_io_target, (void*) &io_point[i])); // join computing threads for (int i = 0; i < num_computing_threads; ++i) assert(0 == pthread_join(computing_threads[i], NULL)); // join computing threads for (int i = 0; i < num_io_threads; ++i) assert(0 == pthread_join(io_threads[i], NULL)); stop = clock(); chrono::steady_clock::time_point t2 = chrono::steady_clock::now(); cout << double(stop - start) / CLOCKS_PER_SEC << endl; cout <<"total time: " << chrono::duration_cast<chrono::microseconds>(t2 - t1).count() << " us" << endl; return 0; }
#include <iostream> #include <random> #include <sstream> #include <cassert> #include <fstream> #include <cfloat> #include <cstdlib> #include <iostream> #include <string> #include <ctime> #include <queue> #include <pthread.h> #include <png.h> #include "tbb/concurrent_queue.h" #include <chrono> #include <hip/hip_runtime.h> using namespace std; using namespace tbb; #define BLOCKS_PER_GRID 32 #define THREADS_PER_BLOCK 32 clock_t start, stop; //Global constants #define t_step 1e-7 #define queue_size 25 static const int iters = 800; static const int steps_per_frame = 2000; static const double delta_per_step = 1e-5; static const double delta_minimum = 1e-7; static const double t_start = -3.0; static const double t_end = 3.0; static const int fad_speed = 10; static std::mt19937 rand_gen; static const float dot_sizes[3] = { 1.0f, 3.0f, 10.0f }; static const int num_params = 18; double params[num_params]; // 18 //Global variables static int window_w = 1600; static int window_h = 900; static int window_bits = 24; static float plot_scale = 0.25f; static float plot_x = 0.0f; static float plot_y = 0.0f; // thread constants int num_computing_threads = 2; int num_io_threads = 6; // int start_points[6] = {0, 1, 2, 3, 4, 5}; // int start_points[3] = {0, 2, 4}; // start from [i] - 3; int start_points[2] = {0, 3}; int each_thread_step = 3;//int(6/num_computing_threads); // -3 ~ 3 / num_computing_therads int io_point[6] = {0, 1, 2, 3, 4, 5}; int computing_to_io_ratio = 3; // 1 computing thread maps to 3 io thread struct Color{ int r; int g; int b; }; struct Vector2f{ double x; double y; } ; struct Vertex{ Vector2f position; Color color; }; struct raw_vector2f{ double* xs; double* ys; }; struct V{ vector<Vertex> vertex_array; double t; }; // queue<V> vertex_array_queue[6]; concurrent_queue<V> vertex_array_queue[6]; inline void e(hipError_t err, const char* file, int line) { if (err != hipSuccess) { printf("Error in %s at line %d:\n\t%s\n", file, line, hipGetErrorString(err)); exit(EXIT_FAILURE); } } #define HANDLE_ERROR(err) ( e(err, __FILE__, __LINE__) ) static Color GetRandColor(int i) { i += 1; int r = std::min(255, 50 + (i * 11909) % 256); int g = std::min(255, 50 + (i * 52973) % 256); int b = std::min(255, 50 + (i * 44111) % 256); return Color{r, g, b}; } static void ResetPlot() { plot_scale = 0.25f; plot_x = 0.0f; plot_y = 0.0f; } __device__ void ToScreen(Vector2f& screenPt) { const float s = 0.25f * 1600.0 / 2.0; const float nx = 1600.0 * 0.5f + (float(screenPt.x) - 0.0) * s; const float ny = 900.0 * 0.5f + (float(screenPt.y) - 0.0) * s; screenPt.x = nx; screenPt.y = ny; // return Vector2f{nx, ny}; } static void RandParams(double* params) { params[ 0] = 1; params[ 1] = 0; params[ 2] = 0; params[ 3] = 0; params[ 4] =-1; params[ 5] = 1; params[ 6] =-1; params[ 7] = 0; params[ 8] = 0; params[ 9] = 0; params[10] =-1; params[11] =-1; params[12] =-1; params[13] =-1; params[14] =-1; params[15] = 0; params[16] =-1; params[17] = 0; } void write_png(const char* filename, const int width, const int height, const int* imageR, const int* imageG, const int* imageB) { FILE* fp = fopen(filename, "wb"); assert(fp); png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); assert(png_ptr); png_infop info_ptr = png_create_info_struct(png_ptr); assert(info_ptr); png_init_io(png_ptr, fp); png_set_IHDR(png_ptr, info_ptr, width, height, 8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); png_write_info(png_ptr, info_ptr); // png_set_compression_level(png_ptr, 0); size_t row_size = 3 * width * sizeof(png_byte); png_bytep row = (png_bytep)malloc(row_size); for (int y = 0; y < height; ++y) { memset(row, 0, row_size); for (int x = 0; x < width; ++x) { png_bytep color = row + x * 3; color[0] = imageR[x + y * window_w]; color[1] = imageG[x + y * window_w]; color[2] = imageB[x + y * window_w]; } png_write_row(png_ptr, row); } free(row); png_write_end(png_ptr, NULL); png_destroy_write_struct(&png_ptr, &info_ptr); fclose(fp); } void create_png(vector<Vertex>& vertex_array, double t) { // allocate memory for image size_t image_size = window_w * window_h * sizeof(int); int* imageR = (int*)malloc(image_size); int* imageG = (int*)malloc(image_size); int* imageB = (int*)malloc(image_size); memset(imageR, 0, image_size); memset(imageG, 0, image_size); memset(imageB, 0, image_size); // plot the points for (size_t i = 0; i < vertex_array.size(); ++i) { Vector2f screenPt = vertex_array[i].position; // double Color color = vertex_array[i].color; // int int x = int(screenPt.x); int y = int(screenPt.y); if (screenPt.x > 0.0f && screenPt.y > 0.0f && screenPt.x < window_w && screenPt.y < window_h) { imageR[x + y * window_w] = abs(imageR[x + y * window_w] - color.r); imageG[x + y * window_w] = abs(imageG[x + y * window_w] - color.g); imageB[x + y * window_w] = abs(imageB[x + y * window_w] - color.b); } } // start I/O double file_name_double = (t + 3.0)/t_step; // cout << "filename: " << t << " "; char filename[30]; // sprintf(filename , "./pic/%06d.png" , int(file_name_double)); sprintf(filename, "./pic/%09d.png", int(file_name_double)); //cout << filename << endl; write_png(filename, window_w, window_h, imageR, imageG, imageB); free(imageR); free(imageG); free(imageB); } __global__ void compute_each_step(Vector2f* cuda_vector_array, double T) { // index int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int step = id ; step < 1000; step = step + stride) //steps = 2000 { double t = T + step * 1e-7; // bool isOffScreen = true; double x = t; double y = t; for (int iter = 0; iter < 800; ++iter) // 800 { const double xx = x * x; const double yy = y * y; const double tt = t * t; const double xy = x * y; const double xt = x * t; const double yt = y * t; const double nx = xx * 1 + yy * 0 + tt * 0 + xy * 0 + xt *-1 + yt * 1 + x *-1 + y * 0 + t * 0 ; const double ny = xx * 0 + yy *-1 + tt *-1 + xy *-1 + xt *-1 + yt *-1 + x * 0 + y *-1 + t * 0 ; x = nx; y = ny; Vector2f screenPt; screenPt.x = x; screenPt.y = y; ToScreen(screenPt); if (iter < 100) { screenPt.x = FLT_MAX; screenPt.y = FLT_MAX; } cuda_vector_array[step*800 + iter].x = screenPt.x; cuda_vector_array[step*800 + iter].y = screenPt.y; } //iteration end } // step end } void* thread_target(void* arg) { int* start = (int*) arg; int thread_num = int(start[0]); int which_io = 0; int full_hits = 0; // timestep setting double t = double(thread_num) - 3; double local_t_end = t + each_thread_step; // Setup the vertex array V result; result.vertex_array.resize(iters * steps_per_frame); // 800 * 2000 for (size_t i = 0; i < result.vertex_array.size(); ++i) result.vertex_array[i].color = GetRandColor(i % iters); while(t < local_t_end) { // wait for i/o if (vertex_array_queue[thread_num + which_io].unsafe_size() >= queue_size) { // cout << "full hits: " << ++full_hits << " ,which io thread: " << thread_num + which_io << endl; continue; }else { full_hits = 0; } // set GPU id if (thread_num == 0) hipSetDevice(0); else hipSetDevice(1); // GPU memory Vector2f* cuda_vector_array; Vector2f* vector_array = (Vector2f*)malloc(iters * steps_per_frame/2 * sizeof(Vector2f)); HANDLE_ERROR( hipMalloc(&cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f))); /*********************** first round ***********************/ // invoke kernel compute_each_step<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(cuda_vector_array, t); // cathc error from kernel synchronize HANDLE_ERROR( hipGetLastError()); // catch error from kernel asynchronize HANDLE_ERROR( hipDeviceSynchronize()); HANDLE_ERROR( hipMemcpy(&vector_array, cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f), hipMemcpyDeviceToHost)); // copy data back to result.vertex_array for (size_t i = 0; i < result.vertex_array.size() / 2; ++i) { result.vertex_array[i].position.x = vector_array[i].x; result.vertex_array[i].position.y = vector_array[i].y; } t += 1000 * 1e-7; /*********************** secodn round ***********************/ // invoke kernel compute_each_step<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(cuda_vector_array, t); // cathc error from kernel synchronize HANDLE_ERROR( hipGetLastError()); // catch error from kernel asynchronize HANDLE_ERROR( hipDeviceSynchronize()); HANDLE_ERROR( hipMemcpy(&vector_array, cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f), hipMemcpyDeviceToHost)); // copy data back to result.vertex_array int st = result.vertex_array.size() / 2; for (size_t i = 0; i < result.vertex_array.size() / 2; ++i) { result.vertex_array[st + i].position.x = vector_array[i].x; result.vertex_array[st + i].position.y = vector_array[i].y; } t += 1000 * 1e-7; free(vector_array); // Draw the data // put the vertex array to queue result.t = t; vertex_array_queue[thread_num + which_io].push(result); which_io = (which_io + 1) % computing_to_io_ratio; } // t end cout << "computing thread: " << thread_num << " finished" << endl; result.t = -100; for (int i = 0; i < computing_to_io_ratio; i++) vertex_array_queue[thread_num + i].push(result); pthread_exit(NULL); } void* thread_io_target(void* arg) { int* start = (int*) arg; int io_num = int(start[0]); int empty_hits = 0; // cout << "io thread: " << io_num << " start working" << endl; while (true) { if (vertex_array_queue[io_num].empty()) { // cout << "empty hits: " << ++empty_hits <<", which io: " << io_num << endl; continue; }else { empty_hits = 0; } // take out the first result // V result = vertex_array_queue[io_num].front(); // vertex_array_queue[io_num].pop(); V result; if (!vertex_array_queue[io_num].try_pop(result)) continue; // check if the computing thread finished if (result.t == -100) break; vector<Vertex> vertex_array; vertex_array.resize(result.vertex_array.size()); vertex_array = result.vertex_array; double t = result.t; create_png(vertex_array, t); } cout << "io thread " << io_num << " exits" << endl; pthread_exit(NULL); } int main(int argc, char* argv[]) { cout << "start computing........." << endl; chrono::steady_clock::time_point t1 = chrono::steady_clock::now(); start = clock(); rand_gen.seed((unsigned int)time(0)); // Initialize random parameters ResetPlot(); RandParams(params); pthread_t computing_threads[num_computing_threads]; pthread_t io_threads[num_io_threads]; // create computing threads for (int i = 0; i < num_computing_threads; ++i) assert (0 == pthread_create(&computing_threads[i], NULL, thread_target, (void*) &start_points[i])); // create i/o threads for (int i = 0; i < num_io_threads; ++i) assert (0 == pthread_create(&io_threads[i], NULL, thread_io_target, (void*) &io_point[i])); // join computing threads for (int i = 0; i < num_computing_threads; ++i) assert(0 == pthread_join(computing_threads[i], NULL)); // join computing threads for (int i = 0; i < num_io_threads; ++i) assert(0 == pthread_join(io_threads[i], NULL)); stop = clock(); chrono::steady_clock::time_point t2 = chrono::steady_clock::now(); cout << double(stop - start) / CLOCKS_PER_SEC << endl; cout <<"total time: " << chrono::duration_cast<chrono::microseconds>(t2 - t1).count() << " us" << endl; return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <iostream> #include <random> #include <sstream> #include <cassert> #include <fstream> #include <cfloat> #include <cstdlib> #include <iostream> #include <string> #include <ctime> #include <queue> #include <pthread.h> #include <png.h> #include "tbb/concurrent_queue.h" #include <chrono> #include <hip/hip_runtime.h> using namespace std; using namespace tbb; #define BLOCKS_PER_GRID 32 #define THREADS_PER_BLOCK 32 clock_t start, stop; //Global constants #define t_step 1e-7 #define queue_size 25 static const int iters = 800; static const int steps_per_frame = 2000; static const double delta_per_step = 1e-5; static const double delta_minimum = 1e-7; static const double t_start = -3.0; static const double t_end = 3.0; static const int fad_speed = 10; static std::mt19937 rand_gen; static const float dot_sizes[3] = { 1.0f, 3.0f, 10.0f }; static const int num_params = 18; double params[num_params]; // 18 //Global variables static int window_w = 1600; static int window_h = 900; static int window_bits = 24; static float plot_scale = 0.25f; static float plot_x = 0.0f; static float plot_y = 0.0f; // thread constants int num_computing_threads = 2; int num_io_threads = 6; // int start_points[6] = {0, 1, 2, 3, 4, 5}; // int start_points[3] = {0, 2, 4}; // start from [i] - 3; int start_points[2] = {0, 3}; int each_thread_step = 3;//int(6/num_computing_threads); // -3 ~ 3 / num_computing_therads int io_point[6] = {0, 1, 2, 3, 4, 5}; int computing_to_io_ratio = 3; // 1 computing thread maps to 3 io thread struct Color{ int r; int g; int b; }; struct Vector2f{ double x; double y; } ; struct Vertex{ Vector2f position; Color color; }; struct raw_vector2f{ double* xs; double* ys; }; struct V{ vector<Vertex> vertex_array; double t; }; // queue<V> vertex_array_queue[6]; concurrent_queue<V> vertex_array_queue[6]; inline void e(hipError_t err, const char* file, int line) { if (err != hipSuccess) { printf("Error in %s at line %d:\n\t%s\n", file, line, hipGetErrorString(err)); exit(EXIT_FAILURE); } } #define HANDLE_ERROR(err) ( e(err, __FILE__, __LINE__) ) static Color GetRandColor(int i) { i += 1; int r = std::min(255, 50 + (i * 11909) % 256); int g = std::min(255, 50 + (i * 52973) % 256); int b = std::min(255, 50 + (i * 44111) % 256); return Color{r, g, b}; } static void ResetPlot() { plot_scale = 0.25f; plot_x = 0.0f; plot_y = 0.0f; } __device__ void ToScreen(Vector2f& screenPt) { const float s = 0.25f * 1600.0 / 2.0; const float nx = 1600.0 * 0.5f + (float(screenPt.x) - 0.0) * s; const float ny = 900.0 * 0.5f + (float(screenPt.y) - 0.0) * s; screenPt.x = nx; screenPt.y = ny; // return Vector2f{nx, ny}; } static void RandParams(double* params) { params[ 0] = 1; params[ 1] = 0; params[ 2] = 0; params[ 3] = 0; params[ 4] =-1; params[ 5] = 1; params[ 6] =-1; params[ 7] = 0; params[ 8] = 0; params[ 9] = 0; params[10] =-1; params[11] =-1; params[12] =-1; params[13] =-1; params[14] =-1; params[15] = 0; params[16] =-1; params[17] = 0; } void write_png(const char* filename, const int width, const int height, const int* imageR, const int* imageG, const int* imageB) { FILE* fp = fopen(filename, "wb"); assert(fp); png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); assert(png_ptr); png_infop info_ptr = png_create_info_struct(png_ptr); assert(info_ptr); png_init_io(png_ptr, fp); png_set_IHDR(png_ptr, info_ptr, width, height, 8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); png_write_info(png_ptr, info_ptr); // png_set_compression_level(png_ptr, 0); size_t row_size = 3 * width * sizeof(png_byte); png_bytep row = (png_bytep)malloc(row_size); for (int y = 0; y < height; ++y) { memset(row, 0, row_size); for (int x = 0; x < width; ++x) { png_bytep color = row + x * 3; color[0] = imageR[x + y * window_w]; color[1] = imageG[x + y * window_w]; color[2] = imageB[x + y * window_w]; } png_write_row(png_ptr, row); } free(row); png_write_end(png_ptr, NULL); png_destroy_write_struct(&png_ptr, &info_ptr); fclose(fp); } void create_png(vector<Vertex>& vertex_array, double t) { // allocate memory for image size_t image_size = window_w * window_h * sizeof(int); int* imageR = (int*)malloc(image_size); int* imageG = (int*)malloc(image_size); int* imageB = (int*)malloc(image_size); memset(imageR, 0, image_size); memset(imageG, 0, image_size); memset(imageB, 0, image_size); // plot the points for (size_t i = 0; i < vertex_array.size(); ++i) { Vector2f screenPt = vertex_array[i].position; // double Color color = vertex_array[i].color; // int int x = int(screenPt.x); int y = int(screenPt.y); if (screenPt.x > 0.0f && screenPt.y > 0.0f && screenPt.x < window_w && screenPt.y < window_h) { imageR[x + y * window_w] = abs(imageR[x + y * window_w] - color.r); imageG[x + y * window_w] = abs(imageG[x + y * window_w] - color.g); imageB[x + y * window_w] = abs(imageB[x + y * window_w] - color.b); } } // start I/O double file_name_double = (t + 3.0)/t_step; // cout << "filename: " << t << " "; char filename[30]; // sprintf(filename , "./pic/%06d.png" , int(file_name_double)); sprintf(filename, "./pic/%09d.png", int(file_name_double)); //cout << filename << endl; write_png(filename, window_w, window_h, imageR, imageG, imageB); free(imageR); free(imageG); free(imageB); } __global__ void compute_each_step(Vector2f* cuda_vector_array, double T) { // index int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int step = id ; step < 1000; step = step + stride) //steps = 2000 { double t = T + step * 1e-7; // bool isOffScreen = true; double x = t; double y = t; for (int iter = 0; iter < 800; ++iter) // 800 { const double xx = x * x; const double yy = y * y; const double tt = t * t; const double xy = x * y; const double xt = x * t; const double yt = y * t; const double nx = xx * 1 + yy * 0 + tt * 0 + xy * 0 + xt *-1 + yt * 1 + x *-1 + y * 0 + t * 0 ; const double ny = xx * 0 + yy *-1 + tt *-1 + xy *-1 + xt *-1 + yt *-1 + x * 0 + y *-1 + t * 0 ; x = nx; y = ny; Vector2f screenPt; screenPt.x = x; screenPt.y = y; ToScreen(screenPt); if (iter < 100) { screenPt.x = FLT_MAX; screenPt.y = FLT_MAX; } cuda_vector_array[step*800 + iter].x = screenPt.x; cuda_vector_array[step*800 + iter].y = screenPt.y; } //iteration end } // step end } void* thread_target(void* arg) { int* start = (int*) arg; int thread_num = int(start[0]); int which_io = 0; int full_hits = 0; // timestep setting double t = double(thread_num) - 3; double local_t_end = t + each_thread_step; // Setup the vertex array V result; result.vertex_array.resize(iters * steps_per_frame); // 800 * 2000 for (size_t i = 0; i < result.vertex_array.size(); ++i) result.vertex_array[i].color = GetRandColor(i % iters); while(t < local_t_end) { // wait for i/o if (vertex_array_queue[thread_num + which_io].unsafe_size() >= queue_size) { // cout << "full hits: " << ++full_hits << " ,which io thread: " << thread_num + which_io << endl; continue; }else { full_hits = 0; } // set GPU id if (thread_num == 0) hipSetDevice(0); else hipSetDevice(1); // GPU memory Vector2f* cuda_vector_array; Vector2f* vector_array = (Vector2f*)malloc(iters * steps_per_frame/2 * sizeof(Vector2f)); HANDLE_ERROR( hipMalloc(&cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f))); /*********************** first round ***********************/ // invoke kernel compute_each_step<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(cuda_vector_array, t); // cathc error from kernel synchronize HANDLE_ERROR( hipGetLastError()); // catch error from kernel asynchronize HANDLE_ERROR( hipDeviceSynchronize()); HANDLE_ERROR( hipMemcpy(&vector_array, cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f), hipMemcpyDeviceToHost)); // copy data back to result.vertex_array for (size_t i = 0; i < result.vertex_array.size() / 2; ++i) { result.vertex_array[i].position.x = vector_array[i].x; result.vertex_array[i].position.y = vector_array[i].y; } t += 1000 * 1e-7; /*********************** secodn round ***********************/ // invoke kernel compute_each_step<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(cuda_vector_array, t); // cathc error from kernel synchronize HANDLE_ERROR( hipGetLastError()); // catch error from kernel asynchronize HANDLE_ERROR( hipDeviceSynchronize()); HANDLE_ERROR( hipMemcpy(&vector_array, cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f), hipMemcpyDeviceToHost)); // copy data back to result.vertex_array int st = result.vertex_array.size() / 2; for (size_t i = 0; i < result.vertex_array.size() / 2; ++i) { result.vertex_array[st + i].position.x = vector_array[i].x; result.vertex_array[st + i].position.y = vector_array[i].y; } t += 1000 * 1e-7; free(vector_array); // Draw the data // put the vertex array to queue result.t = t; vertex_array_queue[thread_num + which_io].push(result); which_io = (which_io + 1) % computing_to_io_ratio; } // t end cout << "computing thread: " << thread_num << " finished" << endl; result.t = -100; for (int i = 0; i < computing_to_io_ratio; i++) vertex_array_queue[thread_num + i].push(result); pthread_exit(NULL); } void* thread_io_target(void* arg) { int* start = (int*) arg; int io_num = int(start[0]); int empty_hits = 0; // cout << "io thread: " << io_num << " start working" << endl; while (true) { if (vertex_array_queue[io_num].empty()) { // cout << "empty hits: " << ++empty_hits <<", which io: " << io_num << endl; continue; }else { empty_hits = 0; } // take out the first result // V result = vertex_array_queue[io_num].front(); // vertex_array_queue[io_num].pop(); V result; if (!vertex_array_queue[io_num].try_pop(result)) continue; // check if the computing thread finished if (result.t == -100) break; vector<Vertex> vertex_array; vertex_array.resize(result.vertex_array.size()); vertex_array = result.vertex_array; double t = result.t; create_png(vertex_array, t); } cout << "io thread " << io_num << " exits" << endl; pthread_exit(NULL); } int main(int argc, char* argv[]) { cout << "start computing........." << endl; chrono::steady_clock::time_point t1 = chrono::steady_clock::now(); start = clock(); rand_gen.seed((unsigned int)time(0)); // Initialize random parameters ResetPlot(); RandParams(params); pthread_t computing_threads[num_computing_threads]; pthread_t io_threads[num_io_threads]; // create computing threads for (int i = 0; i < num_computing_threads; ++i) assert (0 == pthread_create(&computing_threads[i], NULL, thread_target, (void*) &start_points[i])); // create i/o threads for (int i = 0; i < num_io_threads; ++i) assert (0 == pthread_create(&io_threads[i], NULL, thread_io_target, (void*) &io_point[i])); // join computing threads for (int i = 0; i < num_computing_threads; ++i) assert(0 == pthread_join(computing_threads[i], NULL)); // join computing threads for (int i = 0; i < num_io_threads; ++i) assert(0 == pthread_join(io_threads[i], NULL)); stop = clock(); chrono::steady_clock::time_point t2 = chrono::steady_clock::now(); cout << double(stop - start) / CLOCKS_PER_SEC << endl; cout <<"total time: " << chrono::duration_cast<chrono::microseconds>(t2 - t1).count() << " us" << endl; return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17compute_each_stepP8Vector2fd .globl _Z17compute_each_stepP8Vector2fd .p2align 8 .type _Z17compute_each_stepP8Vector2fd,@function _Z17compute_each_stepP8Vector2fd: s_load_b32 s4, s[0:1], 0x1c s_add_u32 s2, s0, 16 s_addc_u32 s3, s1, 0 s_waitcnt lgkmcnt(0) s_and_b32 s8, s4, 0xffff s_mov_b32 s4, exec_lo v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1] s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_i32_e32 0x3e8, v1 s_cbranch_execz .LBB0_5 s_load_b32 s11, s[2:3], 0x0 s_load_b128 s[0:3], s[0:1], 0x0 s_mul_i32 s6, s15, s8 s_mov_b32 s4, 0 s_mul_i32 s9, s6, 0x320 s_mov_b32 s6, 0 v_mad_u32_u24 v0, v0, 0x320, s9 s_mov_b32 s10, 0 s_mov_b32 s5, 0x40690000 s_mov_b32 s7, 0x407c2000 s_mov_b32 s9, 0x3e7ad7f2 s_waitcnt lgkmcnt(0) s_mul_i32 s11, s11, s8 s_mov_b32 s8, 0x9abcaf48 s_mul_i32 s12, s11, 0x320 .LBB0_2: v_cvt_f64_i32_e32 v[2:3], v1 s_mov_b32 s13, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[2:3], v[2:3], s[8:9], s[2:3] v_mul_f64 v[4:5], v[2:3], v[2:3] v_mul_f64 v[8:9], v[2:3], 0 v_dual_mov_b32 v13, v3 :: v_dual_mov_b32 v12, v2 v_dual_mov_b32 v11, v3 :: v_dual_mov_b32 v10, v2 s_delay_alu instid0(VALU_DEP_4) v_mul_f64 v[6:7], v[4:5], 0 .LBB0_3: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_mul_f64 v[14:15], v[12:13], v[12:13] v_mul_f64 v[16:17], v[10:11], v[10:11] s_cmpk_lt_u32 s13, 0x64 s_cselect_b32 s14, -1, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) v_fma_f64 v[18:19], v[14:15], 0, -v[16:17] v_fma_f64 v[14:15], v[16:17], 0, v[14:15] v_mul_f64 v[16:17], v[10:11], v[12:13] v_add_f64 v[18:19], v[18:19], -v[4:5] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f64 v[14:15], v[6:7], v[14:15] v_fma_f64 v[18:19], -v[10:11], v[12:13], v[18:19] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f64 v[14:15], v[16:17], 0, v[14:15] v_fma_f64 v[16:17], v[2:3], -v[12:13], v[18:19] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f64 v[14:15], -v[2:3], v[12:13], v[14:15] v_fma_f64 v[16:17], -v[2:3], v[10:11], v[16:17] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f64 v[14:15], v[2:3], v[10:11], v[14:15] v_fma_f64 v[16:17], v[12:13], 0, v[16:17] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f64 v[12:13], v[14:15], -v[12:13] v_add_f64 v[14:15], v[16:17], -v[10:11] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f64 v[12:13], v[10:11], 0, v[12:13] v_add_f64 v[10:11], v[8:9], v[14:15] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f64 v[12:13], v[8:9], v[12:13] v_cvt_f32_f64_e32 v14, v[10:11] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cvt_f32_f64_e32 v16, v[12:13] v_cvt_f64_f32_e32 v[14:15], v14 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cvt_f64_f32_e32 v[16:17], v16 v_fma_f64 v[14:15], v[14:15], 0x40690000, s[6:7] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f64 v[16:17], v[16:17], s[4:5], 0x40890000 v_cvt_f32_f64_e32 v14, v[14:15] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cvt_f32_f64_e32 v16, v[16:17] v_cvt_f64_f32_e32 v[14:15], v14 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_cvt_f64_f32_e32 v[18:19], v16 v_add_nc_u32_e32 v16, s13, v0 s_add_i32 s13, s13, 1 s_cmpk_eq_i32 s13, 0x320 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v17, 31, v16 v_lshlrev_b64 v[16:17], 4, v[16:17] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v20, vcc_lo, s0, v16 v_add_co_ci_u32_e32 v21, vcc_lo, s1, v17, vcc_lo v_cndmask_b32_e64 v17, v15, 0x47efffff, s14 v_cndmask_b32_e64 v16, v14, 0xe0000000, s14 v_cndmask_b32_e64 v15, v19, 0x47efffff, s14 v_cndmask_b32_e64 v14, v18, 0xe0000000, s14 global_store_b128 v[20:21], v[14:17], off s_cbranch_scc0 .LBB0_3 v_add_nc_u32_e32 v1, s11, v1 v_add_nc_u32_e32 v0, s12, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_cmp_lt_i32_e32 vcc_lo, 0x3e7, v1 s_or_b32 s10, vcc_lo, s10 s_and_not1_b32 exec_lo, exec_lo, s10 s_cbranch_execnz .LBB0_2 .LBB0_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17compute_each_stepP8Vector2fd .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 22 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17compute_each_stepP8Vector2fd, .Lfunc_end0-_Z17compute_each_stepP8Vector2fd .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 8 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17compute_each_stepP8Vector2fd .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17compute_each_stepP8Vector2fd.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 22 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z17compute_each_stepP8Vector2fd .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GT.AND P0, PT, R0, 0x3e7, PT ; /* 0x000003e70000780c */ /* 0x000fda0003f04270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ ULDC.64 UR4, c[0x0][0x160] ; /* 0x0000580000047ab9 */ /* 0x000fe40000000a00 */ /*0070*/ UIADD3 UR4, UP0, UR4, 0x8, URZ ; /* 0x0000000804047890 */ /* 0x000fc8000ff1e03f */ /*0080*/ UIADD3.X UR5, URZ, UR5, URZ, UP0, !UPT ; /* 0x000000053f057290 */ /* 0x000fe400087fe43f */ /*0090*/ IMAD.U32 R4, RZ, RZ, UR4 ; /* 0x00000004ff047e24 */ /* 0x000fc8000f8e00ff */ /*00a0*/ IMAD.U32 R5, RZ, RZ, UR5 ; /* 0x00000005ff057e24 */ /* 0x000fe2000f8e00ff */ /*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*00c0*/ I2F.F64 R6, R0 ; /* 0x0000000000067312 */ /* 0x000e220000201c00 */ /*00d0*/ IMAD.MOV.U32 R2, RZ, RZ, -0x654350b8 ; /* 0x9abcaf48ff027424 */ /* 0x000fe400078e00ff */ /*00e0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x3e7ad7f2 ; /* 0x3e7ad7f2ff037424 */ /* 0x000fe400078e00ff */ /*00f0*/ IMAD R11, R0, 0x320, RZ ; /* 0x00000320000b7824 */ /* 0x000fe400078e02ff */ /*0100*/ IMAD.MOV.U32 R26, RZ, RZ, RZ ; /* 0x000000ffff1a7224 */ /* 0x000fc600078e00ff */ /*0110*/ IADD3 R9, R11.reuse, 0x1, RZ ; /* 0x000000010b097810 */ /* 0x040fe20007ffe0ff */ /*0120*/ IMAD.WIDE R10, R11, 0x10, R4 ; /* 0x000000100b0a7825 */ /* 0x000fe200078e0204 */ /*0130*/ DFMA R6, R6, R2, c[0x0][0x168] ; /* 0x00005a000606762b */ /* 0x0030460000000002 */ /*0140*/ IMAD.WIDE R2, R9, 0x10, R4 ; /* 0x0000001009027825 */ /* 0x001fc600078e0204 */ /*0150*/ DMUL R8, R6, R6 ; /* 0x0000000606087228 */ /* 0x0020480000000000 */ /*0160*/ IMAD.MOV.U32 R18, RZ, RZ, R6.reuse ; /* 0x000000ffff127224 */ /* 0x100fe400078e0006 */ /*0170*/ IMAD.MOV.U32 R19, RZ, RZ, R7.reuse ; /* 0x000000ffff137224 */ /* 0x100fe400078e0007 */ /*0180*/ IMAD.MOV.U32 R16, RZ, RZ, R6 ; /* 0x000000ffff107224 */ /* 0x000fe400078e0006 */ /*0190*/ IMAD.MOV.U32 R17, RZ, RZ, R7 ; /* 0x000000ffff117224 */ /* 0x003fe400078e0007 */ /*01a0*/ DMUL R22, R18, R18 ; /* 0x0000001212167228 */ /* 0x000fe20000000000 */ /*01b0*/ ISETP.GE.U32.AND P0, PT, R26, 0x64, PT ; /* 0x000000641a00780c */ /* 0x000fc60003f06070 */ /*01c0*/ DMUL R12, R16, R16 ; /* 0x00000010100c7228 */ /* 0x002e080000000000 */ /*01d0*/ DMUL R14, R16, R18 ; /* 0x00000012100e7228 */ /* 0x000fc80000000000 */ /*01e0*/ DFMA R20, RZ, R12, -R22 ; /* 0x0000000cff14722b */ /* 0x001e080000000816 */ /*01f0*/ DFMA R12, RZ, R22, R12 ; /* 0x00000016ff0c722b */ /* 0x000e48000000000c */ /*0200*/ DADD R20, -R8, R20 ; /* 0x0000000008147229 */ /* 0x001e080000000114 */ /*0210*/ DFMA R12, RZ, R8, R12 ; /* 0x00000008ff0c722b */ /* 0x002e48000000000c */ /*0220*/ DADD R20, -R14, R20 ; /* 0x000000000e147229 */ /* 0x001e080000000114 */ /*0230*/ DFMA R12, RZ, R14, R12 ; /* 0x0000000eff0c722b */ /* 0x002e48000000000c */ /*0240*/ DFMA R20, -R6, R16, R20 ; /* 0x000000100614722b */ /* 0x001e080000000114 */ /*0250*/ DFMA R12, -R6, R16, R12 ; /* 0x00000010060c722b */ /* 0x002e48000000010c */ /*0260*/ DFMA R20, -R6, R18, R20 ; /* 0x000000120614722b */ /* 0x001e080000000114 */ /*0270*/ DFMA R12, R6, R18, R12 ; /* 0x00000012060c722b */ /* 0x002e48000000000c */ /*0280*/ DFMA R20, RZ, R16, R20 ; /* 0x00000010ff14722b */ /* 0x001e080000000014 */ /*0290*/ DADD R12, R12, -R16 ; /* 0x000000000c0c7229 */ /* 0x002e480000000810 */ /*02a0*/ DADD R20, R20, -R18 ; /* 0x0000000014147229 */ /* 0x001e080000000812 */ /*02b0*/ DFMA R12, RZ, R18, R12 ; /* 0x00000012ff0c722b */ /* 0x002e48000000000c */ /*02c0*/ DFMA R16, RZ, R6, R20 ; /* 0x00000006ff10722b */ /* 0x001e080000000014 */ /*02d0*/ DFMA R18, RZ, R6, R12 ; /* 0x00000006ff12722b */ /* 0x0022a4000000000c */ /*02e0*/ F2F.F32.F64 R20, R16 ; /* 0x0000001000147310 */ /* 0x001e220000301000 */ /*02f0*/ IMAD.MOV.U32 R12, RZ, RZ, 0x0 ; /* 0x00000000ff0c7424 */ /* 0x002fe400078e00ff */ /*0300*/ IMAD.MOV.U32 R13, RZ, RZ, 0x40690000 ; /* 0x40690000ff0d7424 */ /* 0x000fca00078e00ff */ /*0310*/ F2F.F32.F64 R27, R18 ; /* 0x00000012001b7310 */ /* 0x004e700000301000 */ /*0320*/ F2F.F64.F32 R20, R20 ; /* 0x0000001400147310 */ /* 0x001e300000201800 */ /*0330*/ F2F.F64.F32 R14, R27 ; /* 0x0000001b000e7310 */ /* 0x002e620000201800 */ /*0340*/ DFMA R24, R20, R12, 450 ; /* 0x407c20001418742b */ /* 0x001e0e000000000c */ /*0350*/ F2F.F32.F64 R21, R24 ; /* 0x0000001800157310 */ /* 0x0010a20000301000 */ /*0360*/ DFMA R22, R14, R12, 800 ; /* 0x408900000e16742b */ /* 0x002e48000000000c */ /*0370*/ DMUL R24, R16, R16 ; /* 0x0000001010187228 */ /* 0x001fcc0000000000 */ /*0380*/ F2F.F32.F64 R22, R22 ; /* 0x0000001600167310 */ /* 0x002e300000301000 */ /*0390*/ F2F.F64.F32 R20, R21 ; /* 0x0000001500147310 */ /* 0x004e700000201800 */ /*03a0*/ F2F.F64.F32 R14, R22 ; /* 0x00000016000e7310 */ /* 0x001e220000201800 */ /*03b0*/ FSEL R28, R20, -3.68934881474191032320e+19, P0 ; /* 0xe0000000141c7808 */ /* 0x002fc40000000000 */ /*03c0*/ FSEL R29, R21, 122879.9921875, P0 ; /* 0x47efffff151d7808 */ /* 0x000fe20000000000 */ /*03d0*/ DMUL R20, R18, R18 ; /* 0x0000001212147228 */ /* 0x000e480000000000 */ /*03e0*/ STG.E.64 [R10.64], R28 ; /* 0x0000001c0a007986 */ /* 0x000fe2000c101b04 */ /*03f0*/ FSEL R14, R14, -3.68934881474191032320e+19, P0 ; /* 0xe00000000e0e7808 */ /* 0x001fe20000000000 */ /*0400*/ DFMA R22, RZ, R24, R20 ; /* 0x00000018ff16722b */ /* 0x002fe20000000014 */ /*0410*/ FSEL R15, R15, 122879.9921875, P0 ; /* 0x47efffff0f0f7808 */ /* 0x000fc60000000000 */ /*0420*/ DFMA R24, RZ, R20, -R24 ; /* 0x00000014ff18722b */ /* 0x000e240000000818 */ /*0430*/ STG.E.64 [R10.64+-0x8], R14 ; /* 0xfffff80e0a007986 */ /* 0x0003e4000c101b04 */ /*0440*/ DMUL R20, R18, R16 ; /* 0x0000001012147228 */ /* 0x000fc80000000000 */ /*0450*/ DADD R24, -R8, R24 ; /* 0x0000000008187229 */ /* 0x001e080000000118 */ /*0460*/ DFMA R22, RZ, R8, R22 ; /* 0x00000008ff16722b */ /* 0x000e880000000016 */ /*0470*/ DADD R24, -R20, R24 ; /* 0x0000000014187229 */ /* 0x001e220000000118 */ /*0480*/ IMAD.MOV.U32 R14, RZ, RZ, R2 ; /* 0x000000ffff0e7224 */ /* 0x002fe200078e0002 */ /*0490*/ IADD3 R10, P1, R10, 0x40, RZ ; /* 0x000000400a0a7810 */ /* 0x000fe20007f3e0ff */ /*04a0*/ IMAD.MOV.U32 R15, RZ, RZ, R3 ; /* 0x000000ffff0f7224 */ /* 0x000fe200078e0003 */ /*04b0*/ DFMA R22, RZ, R20, R22 ; /* 0x00000014ff16722b */ /* 0x004e460000000016 */ /*04c0*/ IMAD.X R11, RZ, RZ, R11, P1 ; /* 0x000000ffff0b7224 */ /* 0x000fe200008e060b */ /*04d0*/ DFMA R20, -R6, R18, R24 ; /* 0x000000120614722b */ /* 0x001e080000000118 */ /*04e0*/ DFMA R22, -R6, R18, R22 ; /* 0x000000120616722b */ /* 0x002e480000000116 */ /*04f0*/ DFMA R20, -R6, R16, R20 ; /* 0x000000100614722b */ /* 0x001e080000000114 */ /*0500*/ DFMA R22, R6, R16, R22 ; /* 0x000000100616722b */ /* 0x002e480000000016 */ /*0510*/ DFMA R20, RZ, R18, R20 ; /* 0x00000012ff14722b */ /* 0x001e080000000014 */ /*0520*/ DADD R22, -R18, R22 ; /* 0x0000000012167229 */ /* 0x002e480000000116 */ /*0530*/ DADD R20, -R16, R20 ; /* 0x0000000010147229 */ /* 0x001e080000000114 */ /*0540*/ DFMA R22, RZ, R16, R22 ; /* 0x00000010ff16722b */ /* 0x002e480000000016 */ /*0550*/ DFMA R18, RZ, R6, R20 ; /* 0x00000006ff12722b */ /* 0x001e080000000014 */ /*0560*/ DFMA R22, RZ, R6, R22 ; /* 0x00000006ff16722b */ /* 0x002e640000000016 */ /*0570*/ F2F.F32.F64 R25, R18 ; /* 0x0000001200197310 */ /* 0x001e300000301000 */ /*0580*/ F2F.F32.F64 R24, R22 ; /* 0x0000001600187310 */ /* 0x002e700000301000 */ /*0590*/ F2F.F64.F32 R20, R25 ; /* 0x0000001900147310 */ /* 0x001e300000201800 */ /*05a0*/ F2F.F64.F32 R16, R24 ; /* 0x0000001800107310 */ /* 0x0022a20000201800 */ /*05b0*/ DFMA R20, R20, R12, 450 ; /* 0x407c20001414742b */ /* 0x001e08000000000c */ /*05c0*/ DMUL R24, R22, R22 ; /* 0x0000001616187228 */ /* 0x002fc80000000000 */ /*05d0*/ DFMA R28, R16, R12, 800 ; /* 0x40890000101c742b */ /* 0x004e64000000000c */ /*05e0*/ F2F.F32.F64 R20, R20 ; /* 0x0000001400147310 */ /* 0x0010b00000301000 */ /*05f0*/ F2F.F32.F64 R27, R28 ; /* 0x0000001c001b7310 */ /* 0x0022e20000301000 */ /*0600*/ IADD3 R21, R26, 0x1, RZ ; /* 0x000000011a157810 */ /* 0x001fc80007ffe0ff */ /*0610*/ ISETP.GE.U32.AND P0, PT, R21, 0x64, PT ; /* 0x000000641500780c */ /* 0x000fc60003f06070 */ /*0620*/ F2F.F64.F32 R16, R20 ; /* 0x0000001400107310 */ /* 0x004e220000201800 */ /*0630*/ DMUL R28, R18, R18 ; /* 0x00000012121c7228 */ /* 0x002e4e0000000000 */ /*0640*/ F2F.F64.F32 R20, R27 ; /* 0x0000001b00147310 */ /* 0x008ea20000201800 */ /*0650*/ FSEL R16, R16, -3.68934881474191032320e+19, P0 ; /* 0xe000000010107808 */ /* 0x001fe40000000000 */ /*0660*/ FSEL R17, R17, 122879.9921875, P0 ; /* 0x47efffff11117808 */ /* 0x000fca0000000000 */ /*0670*/ STG.E.64 [R14.64], R16 ; /* 0x000000100e007986 */ /* 0x0001e2000c101b04 */ /*0680*/ FSEL R2, R20, -3.68934881474191032320e+19, P0 ; /* 0xe000000014027808 */ /* 0x004fe40000000000 */ /*0690*/ FSEL R3, R21, 122879.9921875, P0 ; /* 0x47efffff15037808 */ /* 0x000fe20000000000 */ /*06a0*/ DFMA R20, RZ, R28, R24 ; /* 0x0000001cff14722b */ /* 0x002e480000000018 */ /*06b0*/ DFMA R28, RZ, R24, -R28 ; /* 0x00000018ff1c722b */ /* 0x000ea2000000081c */ /*06c0*/ STG.E.64 [R14.64+-0x8], R2 ; /* 0xfffff8020e007986 */ /* 0x000fe6000c101b04 */ /*06d0*/ DFMA R24, RZ, R8, R20 ; /* 0x00000008ff18722b */ /* 0x002fc80000000014 */ /*06e0*/ DADD R20, -R8, R28 ; /* 0x0000000008147229 */ /* 0x004fc8000000011c */ /*06f0*/ DMUL R28, R22, R18 ; /* 0x00000012161c7228 */ /* 0x000e4c0000000000 */ /*0700*/ DADD R20, -R28, R20 ; /* 0x000000001c147229 */ /* 0x002e480000000114 */ /*0710*/ DFMA R24, RZ, R28, R24 ; /* 0x0000001cff18722b */ /* 0x000e880000000018 */ /*0720*/ DFMA R20, -R6, R22, R20 ; /* 0x000000160614722b */ /* 0x002e480000000114 */ /*0730*/ DFMA R24, -R6, R22, R24 ; /* 0x000000160618722b */ /* 0x004e880000000118 */ /*0740*/ DFMA R20, -R6, R18, R20 ; /* 0x000000120614722b */ /* 0x002e480000000114 */ /*0750*/ DFMA R24, R6, R18, R24 ; /* 0x000000120618722b */ /* 0x004e880000000018 */ /*0760*/ DFMA R20, RZ, R22, R20 ; /* 0x00000016ff14722b */ /* 0x002e080000000014 */ /*0770*/ DADD R24, -R22, R24 ; /* 0x0000000016187229 */ /* 0x004e480000000118 */ /*0780*/ DADD R16, -R18, R20 ; /* 0x0000000012107229 */ /* 0x001e080000000114 */ /*0790*/ DFMA R18, RZ, R18, R24 ; /* 0x00000012ff12722b */ /* 0x002e480000000018 */ /*07a0*/ DFMA R16, RZ, R6, R16 ; /* 0x00000006ff10722b */ /* 0x001e080000000010 */ /*07b0*/ DFMA R18, RZ, R6, R18 ; /* 0x00000006ff12722b */ /* 0x002e640000000012 */ /*07c0*/ F2F.F32.F64 R22, R16 ; /* 0x0000001000167310 */ /* 0x001e300000301000 */ /*07d0*/ F2F.F32.F64 R27, R18 ; /* 0x00000012001b7310 */ /* 0x002e700000301000 */ /*07e0*/ F2F.F64.F32 R22, R22 ; /* 0x0000001600167310 */ /* 0x001e300000201800 */ /*07f0*/ F2F.F64.F32 R20, R27 ; /* 0x0000001b00147310 */ /* 0x002e620000201800 */ /*0800*/ DFMA R24, R22, R12, 450 ; /* 0x407c20001618742b */ /* 0x001e0e000000000c */ /*0810*/ F2F.F32.F64 R23, R24 ; /* 0x0000001800177310 */ /* 0x001e220000301000 */ /*0820*/ DFMA R28, R20, R12, 800 ; /* 0x40890000141c742b */ /* 0x0022a4000000000c */ /*0830*/ IADD3 R20, R26, 0x2, RZ ; /* 0x000000021a147810 */ /* 0x002fc80007ffe0ff */ /*0840*/ ISETP.GE.U32.AND P0, PT, R20, 0x64, PT ; /* 0x000000641400780c */ /* 0x000fc80003f06070 */ /*0850*/ F2F.F32.F64 R28, R28 ; /* 0x0000001c001c7310 */ /* 0x004e700000301000 */ /*0860*/ F2F.F64.F32 R22, R23 ; /* 0x0000001700167310 */ /* 0x001e300000201800 */ /*0870*/ F2F.F64.F32 R20, R28 ; /* 0x0000001c00147310 */ /* 0x002e620000201800 */ /*0880*/ FSEL R24, R22, -3.68934881474191032320e+19, P0 ; /* 0xe000000016187808 */ /* 0x001fc40000000000 */ /*0890*/ FSEL R25, R23, 122879.9921875, P0 ; /* 0x47efffff17197808 */ /* 0x000fe20000000000 */ /*08a0*/ DMUL R22, R18, R18 ; /* 0x0000001212167228 */ /* 0x000fc80000000000 */ /*08b0*/ STG.E.64 [R14.64+0x10], R24 ; /* 0x000010180e007986 */ /* 0x0001e2000c101b04 */ /*08c0*/ FSEL R20, R20, -3.68934881474191032320e+19, P0 ; /* 0xe000000014147808 */ /* 0x002fe40000000000 */ /*08d0*/ FSEL R21, R21, 122879.9921875, P0 ; /* 0x47efffff15157808 */ /* 0x000fca0000000000 */ /*08e0*/ STG.E.64 [R14.64+0x8], R20 ; /* 0x000008140e007986 */ /* 0x0003e2000c101b04 */ /*08f0*/ DMUL R24, R16, R16 ; /* 0x0000001010187228 */ /* 0x001e0c0000000000 */ /*0900*/ DFMA R28, RZ, R24, R22 ; /* 0x00000018ff1c722b */ /* 0x001e080000000016 */ /*0910*/ DFMA R22, RZ, R22, -R24 ; /* 0x00000016ff16722b */ /* 0x000e880000000818 */ /*0920*/ DFMA R28, RZ, R8, R28 ; /* 0x00000008ff1c722b */ /* 0x001fc8000000001c */ /*0930*/ DADD R24, -R8, R22 ; /* 0x0000000008187229 */ /* 0x004fc80000000116 */ /*0940*/ DMUL R22, R18, R16 ; /* 0x0000001012167228 */ /* 0x000e0c0000000000 */ /*0950*/ DADD R2, -R22, R24 ; /* 0x0000000016027229 */ /* 0x001e080000000118 */ /*0960*/ DFMA R28, RZ, R22, R28 ; /* 0x00000016ff1c722b */ /* 0x000e88000000001c */ /*0970*/ DFMA R2, -R6, R18, R2 ; /* 0x000000120602722b */ /* 0x001e080000000102 */ /*0980*/ DFMA R28, -R6, R18, R28 ; /* 0x00000012061c722b */ /* 0x004e88000000011c */ /*0990*/ DFMA R2, -R6, R16, R2 ; /* 0x000000100602722b */ /* 0x001e080000000102 */ /*09a0*/ DFMA R28, R6, R16, R28 ; /* 0x00000010061c722b */ /* 0x004e88000000001c */ /*09b0*/ DFMA R2, RZ, R18, R2 ; /* 0x00000012ff02722b */ /* 0x001e080000000002 */ /*09c0*/ DADD R28, -R18, R28 ; /* 0x00000000121c7229 */ /* 0x004e88000000011c */ /*09d0*/ DADD R2, -R16, R2 ; /* 0x0000000010027229 */ /* 0x001e080000000102 */ /*09e0*/ DFMA R16, RZ, R16, R28 ; /* 0x00000010ff10722b */ /* 0x0044e4000000001c */ /*09f0*/ IADD3 R29, R26.reuse, 0x3, RZ ; /* 0x000000031a1d7810 */ /* 0x044fe40007ffe0ff */ /*0a00*/ DFMA R18, RZ, R6.reuse, R2 ; /* 0x00000006ff12722b */ /* 0x081e220000000002 */ /*0a10*/ IADD3 R26, R26, 0x4, RZ ; /* 0x000000041a1a7810 */ /* 0x000fe40007ffe0ff */ /*0a20*/ ISETP.GE.U32.AND P0, PT, R29, 0x64, PT ; /* 0x000000641d00780c */ /* 0x000fe20003f06070 */ /*0a30*/ DFMA R16, RZ, R6, R16 ; /* 0x00000006ff10722b */ /* 0x008ea40000000010 */ /*0a40*/ F2F.F32.F64 R28, R18 ; /* 0x00000012001c7310 */ /* 0x001e300000301000 */ /*0a50*/ F2F.F32.F64 R27, R16 ; /* 0x00000010001b7310 */ /* 0x004eb00000301000 */ /*0a60*/ F2F.F64.F32 R22, R28 ; /* 0x0000001c00167310 */ /* 0x001e300000201800 */ /*0a70*/ F2F.F64.F32 R2, R27 ; /* 0x0000001b00027310 */ /* 0x004ea20000201800 */ /*0a80*/ DFMA R22, R22, R12, 450 ; /* 0x407c20001616742b */ /* 0x001e08000000000c */ /*0a90*/ DFMA R2, R2, R12, 800 ; /* 0x408900000202742b */ /* 0x004e8c000000000c */ /*0aa0*/ F2F.F32.F64 R22, R22 ; /* 0x0000001600167310 */ /* 0x001e300000301000 */ /*0ab0*/ F2F.F32.F64 R2, R2 ; /* 0x0000000200027310 */ /* 0x004eb00000301000 */ /*0ac0*/ F2F.F64.F32 R24, R22 ; /* 0x0000001600187310 */ /* 0x001e300000201800 */ /*0ad0*/ F2F.F64.F32 R12, R2 ; /* 0x00000002000c7310 */ /* 0x0044e20000201800 */ /*0ae0*/ FSEL R24, R24, -3.68934881474191032320e+19, P0 ; /* 0xe000000018187808 */ /* 0x001fc40000000000 */ /*0af0*/ FSEL R25, R25, 122879.9921875, P0 ; /* 0x47efffff19197808 */ /* 0x000fe40000000000 */ /*0b00*/ IADD3 R2, P2, R14, 0x40, RZ ; /* 0x000000400e027810 */ /* 0x004fc60007f5e0ff */ /*0b10*/ STG.E.64 [R14.64+0x20], R24 ; /* 0x000020180e007986 */ /* 0x0003e2000c101b04 */ /*0b20*/ FSEL R12, R12, -3.68934881474191032320e+19, P0 ; /* 0xe00000000c0c7808 */ /* 0x008fe20000000000 */ /*0b30*/ IMAD.X R3, RZ, RZ, R15, P2 ; /* 0x000000ffff037224 */ /* 0x000fe200010e060f */ /*0b40*/ FSEL R13, R13, 122879.9921875, P0 ; /* 0x47efffff0d0d7808 */ /* 0x000fe40000000000 */ /*0b50*/ ISETP.NE.AND P0, PT, R26, 0x320, PT ; /* 0x000003201a00780c */ /* 0x000fc60003f05270 */ /*0b60*/ STG.E.64 [R14.64+0x18], R12 ; /* 0x0000180c0e007986 */ /* 0x0003f4000c101b04 */ /*0b70*/ @P0 BRA 0x1a0 ; /* 0xfffff62000000947 */ /* 0x000fea000383ffff */ /*0b80*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff037624 */ /* 0x000fc800078e00ff */ /*0b90*/ IMAD R0, R3, c[0x0][0xc], R0 ; /* 0x0000030003007a24 */ /* 0x000fca00078e0200 */ /*0ba0*/ ISETP.GE.AND P0, PT, R0, 0x3e8, PT ; /* 0x000003e80000780c */ /* 0x000fda0003f06270 */ /*0bb0*/ @!P0 BRA 0xc0 ; /* 0xfffff50000008947 */ /* 0x000fea000383ffff */ /*0bc0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0bd0*/ BRA 0xbd0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0be0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0bf0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17compute_each_stepP8Vector2fd .globl _Z17compute_each_stepP8Vector2fd .p2align 8 .type _Z17compute_each_stepP8Vector2fd,@function _Z17compute_each_stepP8Vector2fd: s_load_b32 s4, s[0:1], 0x1c s_add_u32 s2, s0, 16 s_addc_u32 s3, s1, 0 s_waitcnt lgkmcnt(0) s_and_b32 s8, s4, 0xffff s_mov_b32 s4, exec_lo v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1] s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_i32_e32 0x3e8, v1 s_cbranch_execz .LBB0_5 s_load_b32 s11, s[2:3], 0x0 s_load_b128 s[0:3], s[0:1], 0x0 s_mul_i32 s6, s15, s8 s_mov_b32 s4, 0 s_mul_i32 s9, s6, 0x320 s_mov_b32 s6, 0 v_mad_u32_u24 v0, v0, 0x320, s9 s_mov_b32 s10, 0 s_mov_b32 s5, 0x40690000 s_mov_b32 s7, 0x407c2000 s_mov_b32 s9, 0x3e7ad7f2 s_waitcnt lgkmcnt(0) s_mul_i32 s11, s11, s8 s_mov_b32 s8, 0x9abcaf48 s_mul_i32 s12, s11, 0x320 .LBB0_2: v_cvt_f64_i32_e32 v[2:3], v1 s_mov_b32 s13, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[2:3], v[2:3], s[8:9], s[2:3] v_mul_f64 v[4:5], v[2:3], v[2:3] v_mul_f64 v[8:9], v[2:3], 0 v_dual_mov_b32 v13, v3 :: v_dual_mov_b32 v12, v2 v_dual_mov_b32 v11, v3 :: v_dual_mov_b32 v10, v2 s_delay_alu instid0(VALU_DEP_4) v_mul_f64 v[6:7], v[4:5], 0 .LBB0_3: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_mul_f64 v[14:15], v[12:13], v[12:13] v_mul_f64 v[16:17], v[10:11], v[10:11] s_cmpk_lt_u32 s13, 0x64 s_cselect_b32 s14, -1, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) v_fma_f64 v[18:19], v[14:15], 0, -v[16:17] v_fma_f64 v[14:15], v[16:17], 0, v[14:15] v_mul_f64 v[16:17], v[10:11], v[12:13] v_add_f64 v[18:19], v[18:19], -v[4:5] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f64 v[14:15], v[6:7], v[14:15] v_fma_f64 v[18:19], -v[10:11], v[12:13], v[18:19] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f64 v[14:15], v[16:17], 0, v[14:15] v_fma_f64 v[16:17], v[2:3], -v[12:13], v[18:19] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f64 v[14:15], -v[2:3], v[12:13], v[14:15] v_fma_f64 v[16:17], -v[2:3], v[10:11], v[16:17] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f64 v[14:15], v[2:3], v[10:11], v[14:15] v_fma_f64 v[16:17], v[12:13], 0, v[16:17] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f64 v[12:13], v[14:15], -v[12:13] v_add_f64 v[14:15], v[16:17], -v[10:11] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f64 v[12:13], v[10:11], 0, v[12:13] v_add_f64 v[10:11], v[8:9], v[14:15] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f64 v[12:13], v[8:9], v[12:13] v_cvt_f32_f64_e32 v14, v[10:11] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cvt_f32_f64_e32 v16, v[12:13] v_cvt_f64_f32_e32 v[14:15], v14 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cvt_f64_f32_e32 v[16:17], v16 v_fma_f64 v[14:15], v[14:15], 0x40690000, s[6:7] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f64 v[16:17], v[16:17], s[4:5], 0x40890000 v_cvt_f32_f64_e32 v14, v[14:15] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cvt_f32_f64_e32 v16, v[16:17] v_cvt_f64_f32_e32 v[14:15], v14 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_cvt_f64_f32_e32 v[18:19], v16 v_add_nc_u32_e32 v16, s13, v0 s_add_i32 s13, s13, 1 s_cmpk_eq_i32 s13, 0x320 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v17, 31, v16 v_lshlrev_b64 v[16:17], 4, v[16:17] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v20, vcc_lo, s0, v16 v_add_co_ci_u32_e32 v21, vcc_lo, s1, v17, vcc_lo v_cndmask_b32_e64 v17, v15, 0x47efffff, s14 v_cndmask_b32_e64 v16, v14, 0xe0000000, s14 v_cndmask_b32_e64 v15, v19, 0x47efffff, s14 v_cndmask_b32_e64 v14, v18, 0xe0000000, s14 global_store_b128 v[20:21], v[14:17], off s_cbranch_scc0 .LBB0_3 v_add_nc_u32_e32 v1, s11, v1 v_add_nc_u32_e32 v0, s12, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_cmp_lt_i32_e32 vcc_lo, 0x3e7, v1 s_or_b32 s10, vcc_lo, s10 s_and_not1_b32 exec_lo, exec_lo, s10 s_cbranch_execnz .LBB0_2 .LBB0_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17compute_each_stepP8Vector2fd .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 22 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17compute_each_stepP8Vector2fd, .Lfunc_end0-_Z17compute_each_stepP8Vector2fd .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 8 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17compute_each_stepP8Vector2fd .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17compute_each_stepP8Vector2fd.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 22 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> int main() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Global Memory (bytes): %ld\n", prop.totalGlobalMem); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Max Thread per block : %d \n",prop.maxThreadsPerBlock); printf(" Multiproc count : %d \n",prop.multiProcessorCount); printf(" Max Grid size : %d %d %d \n",prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf(" Max thread dim : %d %d %d \n",prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf(" Registres per block : %d \n",prop.regsPerBlock); printf("\n"); } }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> int main() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Global Memory (bytes): %ld\n", prop.totalGlobalMem); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Max Thread per block : %d \n",prop.maxThreadsPerBlock); printf(" Multiproc count : %d \n",prop.multiProcessorCount); printf(" Max Grid size : %d %d %d \n",prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf(" Max thread dim : %d %d %d \n",prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf(" Registres per block : %d \n",prop.regsPerBlock); printf("\n"); } }
.file "tmpxft_000d87b6_00000000-6_ConfDevice.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Device Number: %d\n" .LC1: .string " Device name: %s\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC2: .string " Memory Clock Rate (KHz): %d\n" .section .rodata.str1.1 .LC3: .string " Global Memory (bytes): %ld\n" .section .rodata.str1.8 .align 8 .LC4: .string " Memory Bus Width (bits): %d\n" .align 8 .LC6: .string " Peak Memory Bandwidth (GB/s): %f\n" .align 8 .LC7: .string " Max Thread per block : %d \n" .section .rodata.str1.1 .LC8: .string " Multiproc count : %d \n" .LC9: .string " Max Grid size : %d %d %d \n" .section .rodata.str1.8 .align 8 .LC10: .string " Max thread dim : %d %d %d \n" .section .rodata.str1.1 .LC11: .string " Registres per block : %d \n" .LC12: .string "\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $1056, %rsp .cfi_def_cfa_offset 1104 movq %fs:40, %rax movq %rax, 1048(%rsp) xorl %eax, %eax leaq 12(%rsp), %rdi call cudaGetDeviceCount@PLT cmpl $0, 12(%rsp) jle .L4 movl $0, %ebx leaq .LC0(%rip), %r13 leaq .LC1(%rip), %r12 leaq .LC2(%rip), %rbp .L5: leaq 16(%rsp), %r14 movl %ebx, %esi movq %r14, %rdi call cudaGetDeviceProperties_v2@PLT movl %ebx, %edx movq %r13, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %r14, %rdx movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 624(%rsp), %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 304(%rsp), %rdx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 628(%rsp), %edx leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT pxor %xmm0, %xmm0 cvtsi2sdl 624(%rsp), %xmm0 addsd %xmm0, %xmm0 movl 628(%rsp), %edx leal 7(%rdx), %eax testl %edx, %edx cmovns %edx, %eax sarl $3, %eax pxor %xmm1, %xmm1 cvtsi2sdl %eax, %xmm1 mulsd %xmm1, %xmm0 divsd .LC5(%rip), %xmm0 leaq .LC6(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl 336(%rsp), %edx leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 404(%rsp), %edx leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 360(%rsp), %r8d movl 356(%rsp), %ecx movl 352(%rsp), %edx leaq .LC9(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 348(%rsp), %r8d movl 344(%rsp), %ecx movl 340(%rsp), %edx leaq .LC10(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 320(%rsp), %edx leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC12(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %ebx cmpl %ebx, 12(%rsp) jg .L5 .L4: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L9 movl $0, %eax addq $1056, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC5: .long 0 .long 1093567616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> int main() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Global Memory (bytes): %ld\n", prop.totalGlobalMem); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Max Thread per block : %d \n",prop.maxThreadsPerBlock); printf(" Multiproc count : %d \n",prop.multiProcessorCount); printf(" Max Grid size : %d %d %d \n",prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf(" Max thread dim : %d %d %d \n",prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf(" Registres per block : %d \n",prop.regsPerBlock); printf("\n"); } }
#include <hip/hip_runtime.h> #include <stdio.h> int main() { int nDevices; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Global Memory (bytes): %ld\n", prop.totalGlobalMem); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Max Thread per block : %d \n",prop.maxThreadsPerBlock); printf(" Multiproc count : %d \n",prop.multiProcessorCount); printf(" Max Grid size : %d %d %d \n",prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf(" Max thread dim : %d %d %d \n",prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf(" Registres per block : %d \n",prop.regsPerBlock); printf("\n"); } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> int main() { int nDevices; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Global Memory (bytes): %ld\n", prop.totalGlobalMem); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Max Thread per block : %d \n",prop.maxThreadsPerBlock); printf(" Multiproc count : %d \n",prop.multiProcessorCount); printf(" Max Grid size : %d %d %d \n",prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf(" Max thread dim : %d %d %d \n",prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf(" Registres per block : %d \n",prop.regsPerBlock); printf("\n"); } }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> int main() { int nDevices; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Global Memory (bytes): %ld\n", prop.totalGlobalMem); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Max Thread per block : %d \n",prop.maxThreadsPerBlock); printf(" Multiproc count : %d \n",prop.multiProcessorCount); printf(" Max Grid size : %d %d %d \n",prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf(" Max thread dim : %d %d %d \n",prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf(" Registres per block : %d \n",prop.regsPerBlock); printf("\n"); } }
.text .file "ConfDevice.hip" .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI0_0: .quad 0x412e848000000000 # double 1.0E+6 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $1480, %rsp # imm = 0x5C8 .cfi_def_cfa_offset 1504 .cfi_offset %rbx, -24 .cfi_offset %rbp, -16 leaq 4(%rsp), %rdi callq hipGetDeviceCount cmpl $0, 4(%rsp) jle .LBB0_3 # %bb.1: # %.lr.ph leaq 8(%rsp), %rbx xorl %ebp, %ebp .p2align 4, 0x90 .LBB0_2: # =>This Inner Loop Header: Depth=1 movq %rbx, %rdi movl %ebp, %esi callq hipGetDevicePropertiesR0600 movl $.L.str, %edi movl %ebp, %esi xorl %eax, %eax callq printf movl $.L.str.1, %edi movq %rbx, %rsi xorl %eax, %eax callq printf movl 616(%rsp), %esi movl $.L.str.2, %edi xorl %eax, %eax callq printf movq 296(%rsp), %rsi movl $.L.str.3, %edi xorl %eax, %eax callq printf movl 620(%rsp), %esi movl $.L.str.4, %edi xorl %eax, %eax callq printf cvtsi2sdl 616(%rsp), %xmm1 addsd %xmm1, %xmm1 movl 620(%rsp), %eax leal 7(%rax), %ecx testl %eax, %eax cmovnsl %eax, %ecx sarl $3, %ecx xorps %xmm0, %xmm0 cvtsi2sd %ecx, %xmm0 mulsd %xmm1, %xmm0 divsd .LCPI0_0(%rip), %xmm0 movl $.L.str.5, %edi movb $1, %al callq printf movl 328(%rsp), %esi movl $.L.str.6, %edi xorl %eax, %eax callq printf movl 396(%rsp), %esi movl $.L.str.7, %edi xorl %eax, %eax callq printf movl 344(%rsp), %esi movl 348(%rsp), %edx movl 352(%rsp), %ecx movl $.L.str.8, %edi xorl %eax, %eax callq printf movl 332(%rsp), %esi movl 336(%rsp), %edx movl 340(%rsp), %ecx movl $.L.str.9, %edi xorl %eax, %eax callq printf movl 312(%rsp), %esi movl $.L.str.10, %edi xorl %eax, %eax callq printf movl $10, %edi callq putchar@PLT incl %ebp cmpl 4(%rsp), %ebp jl .LBB0_2 .LBB0_3: # %._crit_edge xorl %eax, %eax addq $1480, %rsp # imm = 0x5C8 .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size main, .Lfunc_end0-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Device Number: %d\n" .size .L.str, 19 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz " Device name: %s\n" .size .L.str.1, 19 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz " Memory Clock Rate (KHz): %d\n" .size .L.str.2, 31 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz " Global Memory (bytes): %ld\n" .size .L.str.3, 30 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz " Memory Bus Width (bits): %d\n" .size .L.str.4, 31 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz " Peak Memory Bandwidth (GB/s): %f\n" .size .L.str.5, 36 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz " Max Thread per block : %d \n" .size .L.str.6, 31 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz " Multiproc count : %d \n" .size .L.str.7, 26 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz " Max Grid size : %d %d %d \n" .size .L.str.8, 30 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz " Max thread dim : %d %d %d \n" .size .L.str.9, 31 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz " Registres per block : %d \n" .size .L.str.10, 30 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000d87b6_00000000-6_ConfDevice.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Device Number: %d\n" .LC1: .string " Device name: %s\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC2: .string " Memory Clock Rate (KHz): %d\n" .section .rodata.str1.1 .LC3: .string " Global Memory (bytes): %ld\n" .section .rodata.str1.8 .align 8 .LC4: .string " Memory Bus Width (bits): %d\n" .align 8 .LC6: .string " Peak Memory Bandwidth (GB/s): %f\n" .align 8 .LC7: .string " Max Thread per block : %d \n" .section .rodata.str1.1 .LC8: .string " Multiproc count : %d \n" .LC9: .string " Max Grid size : %d %d %d \n" .section .rodata.str1.8 .align 8 .LC10: .string " Max thread dim : %d %d %d \n" .section .rodata.str1.1 .LC11: .string " Registres per block : %d \n" .LC12: .string "\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $1056, %rsp .cfi_def_cfa_offset 1104 movq %fs:40, %rax movq %rax, 1048(%rsp) xorl %eax, %eax leaq 12(%rsp), %rdi call cudaGetDeviceCount@PLT cmpl $0, 12(%rsp) jle .L4 movl $0, %ebx leaq .LC0(%rip), %r13 leaq .LC1(%rip), %r12 leaq .LC2(%rip), %rbp .L5: leaq 16(%rsp), %r14 movl %ebx, %esi movq %r14, %rdi call cudaGetDeviceProperties_v2@PLT movl %ebx, %edx movq %r13, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %r14, %rdx movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 624(%rsp), %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 304(%rsp), %rdx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 628(%rsp), %edx leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT pxor %xmm0, %xmm0 cvtsi2sdl 624(%rsp), %xmm0 addsd %xmm0, %xmm0 movl 628(%rsp), %edx leal 7(%rdx), %eax testl %edx, %edx cmovns %edx, %eax sarl $3, %eax pxor %xmm1, %xmm1 cvtsi2sdl %eax, %xmm1 mulsd %xmm1, %xmm0 divsd .LC5(%rip), %xmm0 leaq .LC6(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl 336(%rsp), %edx leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 404(%rsp), %edx leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 360(%rsp), %r8d movl 356(%rsp), %ecx movl 352(%rsp), %edx leaq .LC9(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 348(%rsp), %r8d movl 344(%rsp), %ecx movl 340(%rsp), %edx leaq .LC10(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 320(%rsp), %edx leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC12(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %ebx cmpl %ebx, 12(%rsp) jg .L5 .L4: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L9 movl $0, %eax addq $1056, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC5: .long 0 .long 1093567616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "ConfDevice.hip" .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI0_0: .quad 0x412e848000000000 # double 1.0E+6 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $1480, %rsp # imm = 0x5C8 .cfi_def_cfa_offset 1504 .cfi_offset %rbx, -24 .cfi_offset %rbp, -16 leaq 4(%rsp), %rdi callq hipGetDeviceCount cmpl $0, 4(%rsp) jle .LBB0_3 # %bb.1: # %.lr.ph leaq 8(%rsp), %rbx xorl %ebp, %ebp .p2align 4, 0x90 .LBB0_2: # =>This Inner Loop Header: Depth=1 movq %rbx, %rdi movl %ebp, %esi callq hipGetDevicePropertiesR0600 movl $.L.str, %edi movl %ebp, %esi xorl %eax, %eax callq printf movl $.L.str.1, %edi movq %rbx, %rsi xorl %eax, %eax callq printf movl 616(%rsp), %esi movl $.L.str.2, %edi xorl %eax, %eax callq printf movq 296(%rsp), %rsi movl $.L.str.3, %edi xorl %eax, %eax callq printf movl 620(%rsp), %esi movl $.L.str.4, %edi xorl %eax, %eax callq printf cvtsi2sdl 616(%rsp), %xmm1 addsd %xmm1, %xmm1 movl 620(%rsp), %eax leal 7(%rax), %ecx testl %eax, %eax cmovnsl %eax, %ecx sarl $3, %ecx xorps %xmm0, %xmm0 cvtsi2sd %ecx, %xmm0 mulsd %xmm1, %xmm0 divsd .LCPI0_0(%rip), %xmm0 movl $.L.str.5, %edi movb $1, %al callq printf movl 328(%rsp), %esi movl $.L.str.6, %edi xorl %eax, %eax callq printf movl 396(%rsp), %esi movl $.L.str.7, %edi xorl %eax, %eax callq printf movl 344(%rsp), %esi movl 348(%rsp), %edx movl 352(%rsp), %ecx movl $.L.str.8, %edi xorl %eax, %eax callq printf movl 332(%rsp), %esi movl 336(%rsp), %edx movl 340(%rsp), %ecx movl $.L.str.9, %edi xorl %eax, %eax callq printf movl 312(%rsp), %esi movl $.L.str.10, %edi xorl %eax, %eax callq printf movl $10, %edi callq putchar@PLT incl %ebp cmpl 4(%rsp), %ebp jl .LBB0_2 .LBB0_3: # %._crit_edge xorl %eax, %eax addq $1480, %rsp # imm = 0x5C8 .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size main, .Lfunc_end0-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Device Number: %d\n" .size .L.str, 19 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz " Device name: %s\n" .size .L.str.1, 19 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz " Memory Clock Rate (KHz): %d\n" .size .L.str.2, 31 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz " Global Memory (bytes): %ld\n" .size .L.str.3, 30 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz " Memory Bus Width (bits): %d\n" .size .L.str.4, 31 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz " Peak Memory Bandwidth (GB/s): %f\n" .size .L.str.5, 36 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz " Max Thread per block : %d \n" .size .L.str.6, 31 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz " Multiproc count : %d \n" .size .L.str.7, 26 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz " Max Grid size : %d %d %d \n" .size .L.str.8, 30 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz " Max thread dim : %d %d %d \n" .size .L.str.9, 31 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz " Registres per block : %d \n" .size .L.str.10, 30 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void inclusivePrefixAdd(unsigned int* d_in, unsigned int* d_out) { //Hillis Steele implementation //NOTE: right now, this is only set up for 1 block of 1024 threads int abs_x = threadIdx.x + blockIdx.x * blockDim.x; int thread_x = threadIdx.x; extern __shared__ unsigned int segment[]; segment[thread_x] = d_in[abs_x]; //d_out[thread_x] = d_in[thread_x]; __syncthreads(); for (unsigned int i = 1; i < blockDim.x; i <<= 1) { if (thread_x >= i) { //d_out[thread_x] = d_out[thread_x] + d_out[thread_x - i]; segment[thread_x] = segment[thread_x] + segment[thread_x - i]; } __syncthreads(); } //this happens in different blocks, so no need to syncthreads() if (blockIdx.x > 0) { //carry over the result of the last segment segment[thread_x] = segment[thread_x] + d_out[blockDim.x * (blockIdx.x - 1)]; } d_out[abs_x] = segment[thread_x]; }
code for sm_80 Function : _Z18inclusivePrefixAddPjS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */ /* 0x000e220000002500 */ /*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R0, R4, c[0x0][0x0], R9 ; /* 0x0000000004007a24 */ /* 0x001fc800078e0209 */ /*0060*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fcc00078e0203 */ /*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*0080*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fe40003f05270 */ /*0090*/ MOV R5, c[0x0][0x0] ; /* 0x0000000000057a02 */ /* 0x000fc80000000f00 */ /*00a0*/ ISETP.GE.U32.AND P1, PT, R5, 0x2, PT ; /* 0x000000020500780c */ /* 0x000fce0003f26070 */ /*00b0*/ @P0 IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff070624 */ /* 0x000fe200078e00ff */ /*00c0*/ @P0 MOV R5, 0x4 ; /* 0x0000000400050802 */ /* 0x000fc60000000f00 */ /*00d0*/ @P0 IMAD R4, R4, R7, -c[0x0][0x0] ; /* 0x8000000004040624 */ /* 0x000fc800078e0207 */ /*00e0*/ @P0 IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004040625 */ /* 0x000fe200078e0005 */ /*00f0*/ STS [R9.X4], R2 ; /* 0x0000000209007388 */ /* 0x0041e80000004800 */ /*0100*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0110*/ @!P1 BRA 0x1d0 ; /* 0x000000b000009947 */ /* 0x000fea0003800000 */ /*0120*/ IMAD.MOV.U32 R2, RZ, RZ, 0x1 ; /* 0x00000001ff027424 */ /* 0x001fca00078e00ff */ /*0130*/ ISETP.GE.U32.AND P1, PT, R9, R2, PT ; /* 0x000000020900720c */ /* 0x000fda0003f26070 */ /*0140*/ @P1 IADD3 R6, R9, -R2, RZ ; /* 0x8000000209061210 */ /* 0x000fe20007ffe0ff */ /*0150*/ @P1 LDS R3, [R9.X4] ; /* 0x0000000009031984 */ /* 0x000fe20000004800 */ /*0160*/ SHF.L.U32 R2, R2, 0x1, RZ ; /* 0x0000000102027819 */ /* 0x000fc800000006ff */ /*0170*/ @P1 LDS R6, [R6.X4] ; /* 0x0000000006061984 */ /* 0x000e240000004800 */ /*0180*/ @P1 IMAD.IADD R3, R3, 0x1, R6 ; /* 0x0000000103031824 */ /* 0x001fca00078e0206 */ /*0190*/ @P1 STS [R9.X4], R3 ; /* 0x0000000309001388 */ /* 0x0001e80000004800 */ /*01a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*01b0*/ ISETP.GE.U32.AND P1, PT, R2, c[0x0][0x0], PT ; /* 0x0000000002007a0c */ /* 0x000fda0003f26070 */ /*01c0*/ @!P1 BRA 0x130 ; /* 0xffffff6000009947 */ /* 0x001fea000383ffff */ /*01d0*/ @P0 LDG.E R4, [R4.64] ; /* 0x0000000404040981 */ /* 0x001ea2000c1e1900 */ /*01e0*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */ /* 0x000fe40000011400 */ /*01f0*/ LEA R2, P1, R0.reuse, c[0x0][0x168], 0x2 ; /* 0x00005a0000027a11 */ /* 0x040fe200078210ff */ /*0200*/ LDS R7, [R9.X4] ; /* 0x0000000009077984 */ /* 0x000ea60000004800 */ /*0210*/ LEA.HI.X R3, R0, c[0x0][0x16c], R3, 0x2, P1 ; /* 0x00005b0000037a11 */ /* 0x000fe200008f1403 */ /*0220*/ @P0 IMAD.IADD R7, R7, 0x1, R4 ; /* 0x0000000107070824 */ /* 0x004fca00078e0204 */ /*0230*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x000fe8000c101904 */ /*0240*/ @P0 STS [R9.X4], R7 ; /* 0x0000000709000388 */ /* 0x000fe20000004800 */ /*0250*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0260*/ BRA 0x260; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0280*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0290*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void inclusivePrefixAdd(unsigned int* d_in, unsigned int* d_out) { //Hillis Steele implementation //NOTE: right now, this is only set up for 1 block of 1024 threads int abs_x = threadIdx.x + blockIdx.x * blockDim.x; int thread_x = threadIdx.x; extern __shared__ unsigned int segment[]; segment[thread_x] = d_in[abs_x]; //d_out[thread_x] = d_in[thread_x]; __syncthreads(); for (unsigned int i = 1; i < blockDim.x; i <<= 1) { if (thread_x >= i) { //d_out[thread_x] = d_out[thread_x] + d_out[thread_x - i]; segment[thread_x] = segment[thread_x] + segment[thread_x - i]; } __syncthreads(); } //this happens in different blocks, so no need to syncthreads() if (blockIdx.x > 0) { //carry over the result of the last segment segment[thread_x] = segment[thread_x] + d_out[blockDim.x * (blockIdx.x - 1)]; } d_out[abs_x] = segment[thread_x]; }
.file "tmpxft_0018b041_00000000-6_inclusivePrefixAdd.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z40__device_stub__Z18inclusivePrefixAddPjS_PjS_ .type _Z40__device_stub__Z18inclusivePrefixAddPjS_PjS_, @function _Z40__device_stub__Z18inclusivePrefixAddPjS_PjS_: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z18inclusivePrefixAddPjS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z40__device_stub__Z18inclusivePrefixAddPjS_PjS_, .-_Z40__device_stub__Z18inclusivePrefixAddPjS_PjS_ .globl _Z18inclusivePrefixAddPjS_ .type _Z18inclusivePrefixAddPjS_, @function _Z18inclusivePrefixAddPjS_: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z40__device_stub__Z18inclusivePrefixAddPjS_PjS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z18inclusivePrefixAddPjS_, .-_Z18inclusivePrefixAddPjS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z18inclusivePrefixAddPjS_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z18inclusivePrefixAddPjS_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void inclusivePrefixAdd(unsigned int* d_in, unsigned int* d_out) { //Hillis Steele implementation //NOTE: right now, this is only set up for 1 block of 1024 threads int abs_x = threadIdx.x + blockIdx.x * blockDim.x; int thread_x = threadIdx.x; extern __shared__ unsigned int segment[]; segment[thread_x] = d_in[abs_x]; //d_out[thread_x] = d_in[thread_x]; __syncthreads(); for (unsigned int i = 1; i < blockDim.x; i <<= 1) { if (thread_x >= i) { //d_out[thread_x] = d_out[thread_x] + d_out[thread_x - i]; segment[thread_x] = segment[thread_x] + segment[thread_x - i]; } __syncthreads(); } //this happens in different blocks, so no need to syncthreads() if (blockIdx.x > 0) { //carry over the result of the last segment segment[thread_x] = segment[thread_x] + d_out[blockDim.x * (blockIdx.x - 1)]; } d_out[abs_x] = segment[thread_x]; }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void inclusivePrefixAdd(unsigned int* d_in, unsigned int* d_out) { //Hillis Steele implementation //NOTE: right now, this is only set up for 1 block of 1024 threads int abs_x = threadIdx.x + blockIdx.x * blockDim.x; int thread_x = threadIdx.x; extern __shared__ unsigned int segment[]; segment[thread_x] = d_in[abs_x]; //d_out[thread_x] = d_in[thread_x]; __syncthreads(); for (unsigned int i = 1; i < blockDim.x; i <<= 1) { if (thread_x >= i) { //d_out[thread_x] = d_out[thread_x] + d_out[thread_x - i]; segment[thread_x] = segment[thread_x] + segment[thread_x - i]; } __syncthreads(); } //this happens in different blocks, so no need to syncthreads() if (blockIdx.x > 0) { //carry over the result of the last segment segment[thread_x] = segment[thread_x] + d_out[blockDim.x * (blockIdx.x - 1)]; } d_out[abs_x] = segment[thread_x]; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void inclusivePrefixAdd(unsigned int* d_in, unsigned int* d_out) { //Hillis Steele implementation //NOTE: right now, this is only set up for 1 block of 1024 threads int abs_x = threadIdx.x + blockIdx.x * blockDim.x; int thread_x = threadIdx.x; extern __shared__ unsigned int segment[]; segment[thread_x] = d_in[abs_x]; //d_out[thread_x] = d_in[thread_x]; __syncthreads(); for (unsigned int i = 1; i < blockDim.x; i <<= 1) { if (thread_x >= i) { //d_out[thread_x] = d_out[thread_x] + d_out[thread_x - i]; segment[thread_x] = segment[thread_x] + segment[thread_x - i]; } __syncthreads(); } //this happens in different blocks, so no need to syncthreads() if (blockIdx.x > 0) { //carry over the result of the last segment segment[thread_x] = segment[thread_x] + d_out[blockDim.x * (blockIdx.x - 1)]; } d_out[abs_x] = segment[thread_x]; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z18inclusivePrefixAddPjS_ .globl _Z18inclusivePrefixAddPjS_ .p2align 8 .type _Z18inclusivePrefixAddPjS_,@function _Z18inclusivePrefixAddPjS_: s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b64 s[4:5], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_cmp_lt_u32 s2, 2 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 2, v[1:2] v_add_co_u32 v3, vcc_lo, s4, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo global_load_b32 v4, v[3:4], off v_lshl_add_u32 v3, v0, 2, 0 s_waitcnt vmcnt(0) ds_store_b32 v3, v4 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_5 s_mov_b32 s3, 1 s_branch .LBB0_3 .p2align 6 .LBB0_2: s_or_b32 exec_lo, exec_lo, s4 s_lshl_b32 s3, s3, 1 s_waitcnt lgkmcnt(0) s_cmp_ge_u32 s3, s2 s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_5 .LBB0_3: s_mov_b32 s4, exec_lo v_cmpx_le_u32_e64 s3, v0 s_cbranch_execz .LBB0_2 v_subrev_nc_u32_e32 v4, s3, v0 s_delay_alu instid0(VALU_DEP_1) v_lshl_add_u32 v4, v4, 2, 0 ds_load_b32 v5, v3 ds_load_b32 v4, v4 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v4, v4, v5 ds_store_b32 v3, v4 s_branch .LBB0_2 .LBB0_5: s_load_b64 s[0:1], s[0:1], 0x8 s_cmp_lg_u32 s15, 0 s_cbranch_scc0 .LBB0_7 s_add_i32 s3, s15, -1 ds_load_b32 v0, v3 s_mul_i32 s2, s3, s2 s_mov_b32 s3, 0 s_delay_alu instid0(SALU_CYCLE_1) s_lshl_b64 s[2:3], s[2:3], 2 s_waitcnt lgkmcnt(0) s_add_u32 s2, s0, s2 s_addc_u32 s3, s1, s3 s_load_b32 s2, s[2:3], 0x0 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v0, s2, v0 ds_store_b32 v3, v0 .LBB0_7: ds_load_b32 v3, v3 v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v3, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18inclusivePrefixAddPjS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z18inclusivePrefixAddPjS_, .Lfunc_end0-_Z18inclusivePrefixAddPjS_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims - .offset: 136 .size: 4 .value_kind: hidden_dynamic_lds_size .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18inclusivePrefixAddPjS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z18inclusivePrefixAddPjS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void inclusivePrefixAdd(unsigned int* d_in, unsigned int* d_out) { //Hillis Steele implementation //NOTE: right now, this is only set up for 1 block of 1024 threads int abs_x = threadIdx.x + blockIdx.x * blockDim.x; int thread_x = threadIdx.x; extern __shared__ unsigned int segment[]; segment[thread_x] = d_in[abs_x]; //d_out[thread_x] = d_in[thread_x]; __syncthreads(); for (unsigned int i = 1; i < blockDim.x; i <<= 1) { if (thread_x >= i) { //d_out[thread_x] = d_out[thread_x] + d_out[thread_x - i]; segment[thread_x] = segment[thread_x] + segment[thread_x - i]; } __syncthreads(); } //this happens in different blocks, so no need to syncthreads() if (blockIdx.x > 0) { //carry over the result of the last segment segment[thread_x] = segment[thread_x] + d_out[blockDim.x * (blockIdx.x - 1)]; } d_out[abs_x] = segment[thread_x]; }
.text .file "inclusivePrefixAdd.hip" .globl _Z33__device_stub__inclusivePrefixAddPjS_ # -- Begin function _Z33__device_stub__inclusivePrefixAddPjS_ .p2align 4, 0x90 .type _Z33__device_stub__inclusivePrefixAddPjS_,@function _Z33__device_stub__inclusivePrefixAddPjS_: # @_Z33__device_stub__inclusivePrefixAddPjS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z18inclusivePrefixAddPjS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z33__device_stub__inclusivePrefixAddPjS_, .Lfunc_end0-_Z33__device_stub__inclusivePrefixAddPjS_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18inclusivePrefixAddPjS_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z18inclusivePrefixAddPjS_,@object # @_Z18inclusivePrefixAddPjS_ .section .rodata,"a",@progbits .globl _Z18inclusivePrefixAddPjS_ .p2align 3, 0x0 _Z18inclusivePrefixAddPjS_: .quad _Z33__device_stub__inclusivePrefixAddPjS_ .size _Z18inclusivePrefixAddPjS_, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z18inclusivePrefixAddPjS_" .size .L__unnamed_1, 27 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z33__device_stub__inclusivePrefixAddPjS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z18inclusivePrefixAddPjS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z18inclusivePrefixAddPjS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */ /* 0x000e220000002500 */ /*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R0, R4, c[0x0][0x0], R9 ; /* 0x0000000004007a24 */ /* 0x001fc800078e0209 */ /*0060*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fcc00078e0203 */ /*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*0080*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fe40003f05270 */ /*0090*/ MOV R5, c[0x0][0x0] ; /* 0x0000000000057a02 */ /* 0x000fc80000000f00 */ /*00a0*/ ISETP.GE.U32.AND P1, PT, R5, 0x2, PT ; /* 0x000000020500780c */ /* 0x000fce0003f26070 */ /*00b0*/ @P0 IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff070624 */ /* 0x000fe200078e00ff */ /*00c0*/ @P0 MOV R5, 0x4 ; /* 0x0000000400050802 */ /* 0x000fc60000000f00 */ /*00d0*/ @P0 IMAD R4, R4, R7, -c[0x0][0x0] ; /* 0x8000000004040624 */ /* 0x000fc800078e0207 */ /*00e0*/ @P0 IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004040625 */ /* 0x000fe200078e0005 */ /*00f0*/ STS [R9.X4], R2 ; /* 0x0000000209007388 */ /* 0x0041e80000004800 */ /*0100*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0110*/ @!P1 BRA 0x1d0 ; /* 0x000000b000009947 */ /* 0x000fea0003800000 */ /*0120*/ IMAD.MOV.U32 R2, RZ, RZ, 0x1 ; /* 0x00000001ff027424 */ /* 0x001fca00078e00ff */ /*0130*/ ISETP.GE.U32.AND P1, PT, R9, R2, PT ; /* 0x000000020900720c */ /* 0x000fda0003f26070 */ /*0140*/ @P1 IADD3 R6, R9, -R2, RZ ; /* 0x8000000209061210 */ /* 0x000fe20007ffe0ff */ /*0150*/ @P1 LDS R3, [R9.X4] ; /* 0x0000000009031984 */ /* 0x000fe20000004800 */ /*0160*/ SHF.L.U32 R2, R2, 0x1, RZ ; /* 0x0000000102027819 */ /* 0x000fc800000006ff */ /*0170*/ @P1 LDS R6, [R6.X4] ; /* 0x0000000006061984 */ /* 0x000e240000004800 */ /*0180*/ @P1 IMAD.IADD R3, R3, 0x1, R6 ; /* 0x0000000103031824 */ /* 0x001fca00078e0206 */ /*0190*/ @P1 STS [R9.X4], R3 ; /* 0x0000000309001388 */ /* 0x0001e80000004800 */ /*01a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*01b0*/ ISETP.GE.U32.AND P1, PT, R2, c[0x0][0x0], PT ; /* 0x0000000002007a0c */ /* 0x000fda0003f26070 */ /*01c0*/ @!P1 BRA 0x130 ; /* 0xffffff6000009947 */ /* 0x001fea000383ffff */ /*01d0*/ @P0 LDG.E R4, [R4.64] ; /* 0x0000000404040981 */ /* 0x001ea2000c1e1900 */ /*01e0*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */ /* 0x000fe40000011400 */ /*01f0*/ LEA R2, P1, R0.reuse, c[0x0][0x168], 0x2 ; /* 0x00005a0000027a11 */ /* 0x040fe200078210ff */ /*0200*/ LDS R7, [R9.X4] ; /* 0x0000000009077984 */ /* 0x000ea60000004800 */ /*0210*/ LEA.HI.X R3, R0, c[0x0][0x16c], R3, 0x2, P1 ; /* 0x00005b0000037a11 */ /* 0x000fe200008f1403 */ /*0220*/ @P0 IMAD.IADD R7, R7, 0x1, R4 ; /* 0x0000000107070824 */ /* 0x004fca00078e0204 */ /*0230*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x000fe8000c101904 */ /*0240*/ @P0 STS [R9.X4], R7 ; /* 0x0000000709000388 */ /* 0x000fe20000004800 */ /*0250*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0260*/ BRA 0x260; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0280*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0290*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z18inclusivePrefixAddPjS_ .globl _Z18inclusivePrefixAddPjS_ .p2align 8 .type _Z18inclusivePrefixAddPjS_,@function _Z18inclusivePrefixAddPjS_: s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b64 s[4:5], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_cmp_lt_u32 s2, 2 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 2, v[1:2] v_add_co_u32 v3, vcc_lo, s4, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo global_load_b32 v4, v[3:4], off v_lshl_add_u32 v3, v0, 2, 0 s_waitcnt vmcnt(0) ds_store_b32 v3, v4 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_5 s_mov_b32 s3, 1 s_branch .LBB0_3 .p2align 6 .LBB0_2: s_or_b32 exec_lo, exec_lo, s4 s_lshl_b32 s3, s3, 1 s_waitcnt lgkmcnt(0) s_cmp_ge_u32 s3, s2 s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_5 .LBB0_3: s_mov_b32 s4, exec_lo v_cmpx_le_u32_e64 s3, v0 s_cbranch_execz .LBB0_2 v_subrev_nc_u32_e32 v4, s3, v0 s_delay_alu instid0(VALU_DEP_1) v_lshl_add_u32 v4, v4, 2, 0 ds_load_b32 v5, v3 ds_load_b32 v4, v4 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v4, v4, v5 ds_store_b32 v3, v4 s_branch .LBB0_2 .LBB0_5: s_load_b64 s[0:1], s[0:1], 0x8 s_cmp_lg_u32 s15, 0 s_cbranch_scc0 .LBB0_7 s_add_i32 s3, s15, -1 ds_load_b32 v0, v3 s_mul_i32 s2, s3, s2 s_mov_b32 s3, 0 s_delay_alu instid0(SALU_CYCLE_1) s_lshl_b64 s[2:3], s[2:3], 2 s_waitcnt lgkmcnt(0) s_add_u32 s2, s0, s2 s_addc_u32 s3, s1, s3 s_load_b32 s2, s[2:3], 0x0 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v0, s2, v0 ds_store_b32 v3, v0 .LBB0_7: ds_load_b32 v3, v3 v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v3, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18inclusivePrefixAddPjS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z18inclusivePrefixAddPjS_, .Lfunc_end0-_Z18inclusivePrefixAddPjS_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims - .offset: 136 .size: 4 .value_kind: hidden_dynamic_lds_size .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18inclusivePrefixAddPjS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z18inclusivePrefixAddPjS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0018b041_00000000-6_inclusivePrefixAdd.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z40__device_stub__Z18inclusivePrefixAddPjS_PjS_ .type _Z40__device_stub__Z18inclusivePrefixAddPjS_PjS_, @function _Z40__device_stub__Z18inclusivePrefixAddPjS_PjS_: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z18inclusivePrefixAddPjS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z40__device_stub__Z18inclusivePrefixAddPjS_PjS_, .-_Z40__device_stub__Z18inclusivePrefixAddPjS_PjS_ .globl _Z18inclusivePrefixAddPjS_ .type _Z18inclusivePrefixAddPjS_, @function _Z18inclusivePrefixAddPjS_: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z40__device_stub__Z18inclusivePrefixAddPjS_PjS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z18inclusivePrefixAddPjS_, .-_Z18inclusivePrefixAddPjS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z18inclusivePrefixAddPjS_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z18inclusivePrefixAddPjS_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "inclusivePrefixAdd.hip" .globl _Z33__device_stub__inclusivePrefixAddPjS_ # -- Begin function _Z33__device_stub__inclusivePrefixAddPjS_ .p2align 4, 0x90 .type _Z33__device_stub__inclusivePrefixAddPjS_,@function _Z33__device_stub__inclusivePrefixAddPjS_: # @_Z33__device_stub__inclusivePrefixAddPjS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z18inclusivePrefixAddPjS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z33__device_stub__inclusivePrefixAddPjS_, .Lfunc_end0-_Z33__device_stub__inclusivePrefixAddPjS_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18inclusivePrefixAddPjS_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z18inclusivePrefixAddPjS_,@object # @_Z18inclusivePrefixAddPjS_ .section .rodata,"a",@progbits .globl _Z18inclusivePrefixAddPjS_ .p2align 3, 0x0 _Z18inclusivePrefixAddPjS_: .quad _Z33__device_stub__inclusivePrefixAddPjS_ .size _Z18inclusivePrefixAddPjS_, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z18inclusivePrefixAddPjS_" .size .L__unnamed_1, 27 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z33__device_stub__inclusivePrefixAddPjS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z18inclusivePrefixAddPjS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" /* Copyright (C) 2009-2012 Fraunhofer SCAI, Schloss Birlinghoven, 53754 Sankt Augustin, Germany; all rights reserved unless otherwise stated. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** Index function to address the two-dimensional arrays Q and R Matrices are stored in column-major order (like Fortran). i is the row, j is the column (index starts at 1) ld is the number of elements for each column */ #define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1)) /* ---------------------------------------------------------------------- */ /* Tuning can be done by different block sizes. */ #define BLOCK1 64 // 8800 GT: 128 x 1 // C1060: 128 x 1 #define BLOCK1X 64 #define BLOCK1Y 8 // 8800 GT: 64 x 4 // C1060: 64 x 8 #define BLOCK2X 512 #define BLOCK2Y 1 /* ---------------------------------------------------------------------- */ /** Kernel for matrix-vector multiplication R(k,k:n) = matmulv( Q(1:m,k:n), Q(1:m) ) Same as this BLAS-2 call: call sgemv('T', m, n-k+1, 1.0, Q(1,k), M, Q(1,k), 1, 0.0, R(k,k), N) The threads in x-dimension are used for parallelization of the dot products, the threads in y-dimension compute different elements of the result vector. Each thread (t1,t2) will be responsible for BLOCK1X columns and BLOCK1Y rows of the matrix Q. */ /* ---------------------------------------------------------------------- */ /** This kernel scales the row k of the matrix R R(k,k:n) = R(k,k:n) * S */ /* ---------------------------------------------------------------------- */ /** This kernel scales the column k of the matrix Q. Q(1:m,k) = Q(1:m,k) * S */ /* ---------------------------------------------------------------------- */ /** This kernel updates the matrix Q by a product of two vectors. Q(1:m,k+1:n) -= R(k,k+1:n) * Q(1:m,k) same as this BLAS-2 call: call sger(M, N-K, -1.0, Q(1,K), 1, R(K,K+1), N, Q(1,K+1), M) Each thread (t1,t2) will be responsible for BLOCK2X columns and BLOCK2Y rows of the matrix Q. */ /* ---------------------------------------------------------------------- */ /** QR factorization of a matrix @param[in] m is number of rows for Q and R @param[in] n is number of columns for Q and R @param[in,out] Q is a matrix of size m x n, column major order @param[out] R is a matrix of size m x n, column major order @returns 0 if successful Q(in) = Q(out) * R, where Q(out) is orthonormal and R upper-triangular */ __global__ void update(float* Q, float* R, int m, int n, int k) { __shared__ float RK[BLOCK2Y]; __shared__ float QK[BLOCK2X]; int tid1 = threadIdx.x; int tid2 = threadIdx.y; int j = blockIdx.y * BLOCK2Y + tid2 + k + 1; if (j < k+1 or j > n) return; if (tid1 == 0) { RK[tid2] = R[IDX2F(k,j,n)]; } for (int i = tid1 + 1; i <= m; i += BLOCK2X ) { if (tid2 == 0) { QK[tid1] = Q[IDX2F(i,k,m)]; } __syncthreads(); Q[IDX2F(i,j,m)] -= QK[tid1] * RK[tid2]; } }
code for sm_80 Function : _Z6updatePfS_iii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002600 */ /*0020*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */ /* 0x000e240000002200 */ /*0030*/ IADD3 R3, R3, c[0x0][0x178], R0 ; /* 0x00005e0003037a10 */ /* 0x001fc80007ffe000 */ /*0040*/ IADD3 R2, R3, 0x1, RZ ; /* 0x0000000103027810 */ /* 0x000fc80007ffe0ff */ /*0050*/ ISETP.GT.AND P0, PT, R2, c[0x0][0x178], PT ; /* 0x00005e0002007a0c */ /* 0x000fc80003f04270 */ /*0060*/ ISETP.GT.OR P0, PT, R2, c[0x0][0x174], !P0 ; /* 0x00005d0002007a0c */ /* 0x000fda0004704670 */ /*0070*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0080*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */ /* 0x000e220000002100 */ /*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*00a0*/ BSSY B0, 0x160 ; /* 0x000000b000007945 */ /* 0x000fe20003800000 */ /*00b0*/ ISETP.NE.AND P0, PT, R2.reuse, RZ, PT ; /* 0x000000ff0200720c */ /* 0x041fe40003f05270 */ /*00c0*/ ISETP.GE.AND P1, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */ /* 0x000fd60003f26270 */ /*00d0*/ @P0 BRA 0x150 ; /* 0x0000007000000947 */ /* 0x000fea0003800000 */ /*00e0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff047624 */ /* 0x000fe200078e00ff */ /*00f0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fc600000001ff */ /*0100*/ IMAD R4, R3, R4, c[0x0][0x178] ; /* 0x00005e0003047624 */ /* 0x000fca00078e0204 */ /*0110*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */ /* 0x000fca0007ffe0ff */ /*0120*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */ /* 0x000fcc00078e0205 */ /*0130*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea8000c1e1900 */ /*0140*/ STS [R0.X4], R5 ; /* 0x0000000500007388 */ /* 0x0041e40000004800 */ /*0150*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0160*/ @P1 EXIT ; /* 0x000000000000194d */ /* 0x000fea0003800000 */ /*0170*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff057624 */ /* 0x001fe200078e00ff */ /*0180*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fe20003f05270 */ /*0190*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */ /* 0x000fe400078e00ff */ /*01a0*/ IMAD R6, R3, c[0x0][0x170], R2 ; /* 0x00005c0003067a24 */ /* 0x000fe200078e0202 */ /*01b0*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */ /* 0x000fe40007ffe0ff */ /*01c0*/ IADD3 R3, R2, 0x1, RZ ; /* 0x0000000102037810 */ /* 0x000fe20007ffe0ff */ /*01d0*/ IMAD.WIDE R6, R6, R4, c[0x0][0x160] ; /* 0x0000580006067625 */ /* 0x000fc800078e0204 */ /*01e0*/ IMAD R5, R5, c[0x0][0x170], R2 ; /* 0x00005c0005057a24 */ /* 0x000fe200078e0202 */ /*01f0*/ MOV R11, R7 ; /* 0x00000007000b7202 */ /* 0x000fc60000000f00 */ /*0200*/ IMAD.WIDE R4, R5, R4, c[0x0][0x160] ; /* 0x0000580005047625 */ /* 0x000fc800078e0204 */ /*0210*/ IMAD.MOV.U32 R10, RZ, RZ, R4 ; /* 0x000000ffff0a7224 */ /* 0x000fe400078e0004 */ /*0220*/ IMAD.MOV.U32 R13, RZ, RZ, R5 ; /* 0x000000ffff0d7224 */ /* 0x000fc600078e0005 */ /*0230*/ @!P0 MOV R4, R10 ; /* 0x0000000a00048202 */ /* 0x001fe20000000f00 */ /*0240*/ @!P0 IMAD.MOV.U32 R5, RZ, RZ, R13 ; /* 0x000000ffff058224 */ /* 0x000fca00078e000d */ /*0250*/ @!P0 LDG.E R7, [R4.64] ; /* 0x0000000404078981 */ /* 0x0000a2000c1e1900 */ /*0260*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */ /* 0x000fe20003800000 */ /*0270*/ IMAD.MOV.U32 R4, RZ, RZ, R6 ; /* 0x000000ffff047224 */ /* 0x001fe200078e0006 */ /*0280*/ MOV R5, R11 ; /* 0x0000000b00057202 */ /* 0x000fe20000000f00 */ /*0290*/ @!P0 STS [R2.X4+0x4], R7 ; /* 0x0000040702008388 */ /* 0x004fe80000004800 */ /*02a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*02b0*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */ /* 0x000ea2000c1e1900 */ /*02c0*/ IADD3 R3, R3, 0x200, RZ ; /* 0x0000020003037810 */ /* 0x000fc40007ffe0ff */ /*02d0*/ IADD3 R10, P2, R10, 0x800, RZ ; /* 0x000008000a0a7810 */ /* 0x000fe20007f5e0ff */ /*02e0*/ LDS R6, [R0.X4] ; /* 0x0000000000067984 */ /* 0x000fe20000004800 */ /*02f0*/ ISETP.GT.AND P1, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */ /* 0x000fe40003f24270 */ /*0300*/ IADD3.X R13, RZ, R13, RZ, P2, !PT ; /* 0x0000000dff0d7210 */ /* 0x000fe200017fe4ff */ /*0310*/ LDS R9, [R2.X4+0x4] ; /* 0x0000040002097984 */ /* 0x000ea40000004800 */ /*0320*/ FFMA R9, R6, -R9, R8 ; /* 0x8000000906097223 */ /* 0x004fe20000000008 */ /*0330*/ IADD3 R6, P3, R4, 0x800, RZ ; /* 0x0000080004067810 */ /* 0x000fc80007f7e0ff */ /*0340*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */ /* 0x0001e2000c101904 */ /*0350*/ IMAD.X R11, RZ, RZ, R5, P3 ; /* 0x000000ffff0b7224 */ /* 0x000fe400018e0605 */ /*0360*/ @!P1 BRA 0x230 ; /* 0xfffffec000009947 */ /* 0x000fea000383ffff */ /*0370*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0380*/ BRA 0x380; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0390*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0400*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0410*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0420*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0430*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0440*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0450*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0460*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0470*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" /* Copyright (C) 2009-2012 Fraunhofer SCAI, Schloss Birlinghoven, 53754 Sankt Augustin, Germany; all rights reserved unless otherwise stated. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** Index function to address the two-dimensional arrays Q and R Matrices are stored in column-major order (like Fortran). i is the row, j is the column (index starts at 1) ld is the number of elements for each column */ #define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1)) /* ---------------------------------------------------------------------- */ /* Tuning can be done by different block sizes. */ #define BLOCK1 64 // 8800 GT: 128 x 1 // C1060: 128 x 1 #define BLOCK1X 64 #define BLOCK1Y 8 // 8800 GT: 64 x 4 // C1060: 64 x 8 #define BLOCK2X 512 #define BLOCK2Y 1 /* ---------------------------------------------------------------------- */ /** Kernel for matrix-vector multiplication R(k,k:n) = matmulv( Q(1:m,k:n), Q(1:m) ) Same as this BLAS-2 call: call sgemv('T', m, n-k+1, 1.0, Q(1,k), M, Q(1,k), 1, 0.0, R(k,k), N) The threads in x-dimension are used for parallelization of the dot products, the threads in y-dimension compute different elements of the result vector. Each thread (t1,t2) will be responsible for BLOCK1X columns and BLOCK1Y rows of the matrix Q. */ /* ---------------------------------------------------------------------- */ /** This kernel scales the row k of the matrix R R(k,k:n) = R(k,k:n) * S */ /* ---------------------------------------------------------------------- */ /** This kernel scales the column k of the matrix Q. Q(1:m,k) = Q(1:m,k) * S */ /* ---------------------------------------------------------------------- */ /** This kernel updates the matrix Q by a product of two vectors. Q(1:m,k+1:n) -= R(k,k+1:n) * Q(1:m,k) same as this BLAS-2 call: call sger(M, N-K, -1.0, Q(1,K), 1, R(K,K+1), N, Q(1,K+1), M) Each thread (t1,t2) will be responsible for BLOCK2X columns and BLOCK2Y rows of the matrix Q. */ /* ---------------------------------------------------------------------- */ /** QR factorization of a matrix @param[in] m is number of rows for Q and R @param[in] n is number of columns for Q and R @param[in,out] Q is a matrix of size m x n, column major order @param[out] R is a matrix of size m x n, column major order @returns 0 if successful Q(in) = Q(out) * R, where Q(out) is orthonormal and R upper-triangular */ __global__ void update(float* Q, float* R, int m, int n, int k) { __shared__ float RK[BLOCK2Y]; __shared__ float QK[BLOCK2X]; int tid1 = threadIdx.x; int tid2 = threadIdx.y; int j = blockIdx.y * BLOCK2Y + tid2 + k + 1; if (j < k+1 or j > n) return; if (tid1 == 0) { RK[tid2] = R[IDX2F(k,j,n)]; } for (int i = tid1 + 1; i <= m; i += BLOCK2X ) { if (tid2 == 0) { QK[tid1] = Q[IDX2F(i,k,m)]; } __syncthreads(); Q[IDX2F(i,j,m)] -= QK[tid1] * RK[tid2]; } }
.file "tmpxft_000d72a7_00000000-6_update.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z30__device_stub__Z6updatePfS_iiiPfS_iii .type _Z30__device_stub__Z6updatePfS_iiiPfS_iii, @function _Z30__device_stub__Z6updatePfS_iiiPfS_iii: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 4(%rsp), %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z6updatePfS_iii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z30__device_stub__Z6updatePfS_iiiPfS_iii, .-_Z30__device_stub__Z6updatePfS_iiiPfS_iii .globl _Z6updatePfS_iii .type _Z6updatePfS_iii, @function _Z6updatePfS_iii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z30__device_stub__Z6updatePfS_iiiPfS_iii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z6updatePfS_iii, .-_Z6updatePfS_iii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z6updatePfS_iii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z6updatePfS_iii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" /* Copyright (C) 2009-2012 Fraunhofer SCAI, Schloss Birlinghoven, 53754 Sankt Augustin, Germany; all rights reserved unless otherwise stated. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** Index function to address the two-dimensional arrays Q and R Matrices are stored in column-major order (like Fortran). i is the row, j is the column (index starts at 1) ld is the number of elements for each column */ #define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1)) /* ---------------------------------------------------------------------- */ /* Tuning can be done by different block sizes. */ #define BLOCK1 64 // 8800 GT: 128 x 1 // C1060: 128 x 1 #define BLOCK1X 64 #define BLOCK1Y 8 // 8800 GT: 64 x 4 // C1060: 64 x 8 #define BLOCK2X 512 #define BLOCK2Y 1 /* ---------------------------------------------------------------------- */ /** Kernel for matrix-vector multiplication R(k,k:n) = matmulv( Q(1:m,k:n), Q(1:m) ) Same as this BLAS-2 call: call sgemv('T', m, n-k+1, 1.0, Q(1,k), M, Q(1,k), 1, 0.0, R(k,k), N) The threads in x-dimension are used for parallelization of the dot products, the threads in y-dimension compute different elements of the result vector. Each thread (t1,t2) will be responsible for BLOCK1X columns and BLOCK1Y rows of the matrix Q. */ /* ---------------------------------------------------------------------- */ /** This kernel scales the row k of the matrix R R(k,k:n) = R(k,k:n) * S */ /* ---------------------------------------------------------------------- */ /** This kernel scales the column k of the matrix Q. Q(1:m,k) = Q(1:m,k) * S */ /* ---------------------------------------------------------------------- */ /** This kernel updates the matrix Q by a product of two vectors. Q(1:m,k+1:n) -= R(k,k+1:n) * Q(1:m,k) same as this BLAS-2 call: call sger(M, N-K, -1.0, Q(1,K), 1, R(K,K+1), N, Q(1,K+1), M) Each thread (t1,t2) will be responsible for BLOCK2X columns and BLOCK2Y rows of the matrix Q. */ /* ---------------------------------------------------------------------- */ /** QR factorization of a matrix @param[in] m is number of rows for Q and R @param[in] n is number of columns for Q and R @param[in,out] Q is a matrix of size m x n, column major order @param[out] R is a matrix of size m x n, column major order @returns 0 if successful Q(in) = Q(out) * R, where Q(out) is orthonormal and R upper-triangular */ __global__ void update(float* Q, float* R, int m, int n, int k) { __shared__ float RK[BLOCK2Y]; __shared__ float QK[BLOCK2X]; int tid1 = threadIdx.x; int tid2 = threadIdx.y; int j = blockIdx.y * BLOCK2Y + tid2 + k + 1; if (j < k+1 or j > n) return; if (tid1 == 0) { RK[tid2] = R[IDX2F(k,j,n)]; } for (int i = tid1 + 1; i <= m; i += BLOCK2X ) { if (tid2 == 0) { QK[tid1] = Q[IDX2F(i,k,m)]; } __syncthreads(); Q[IDX2F(i,j,m)] -= QK[tid1] * RK[tid2]; } }
#include <hip/hip_runtime.h> #include "includes.h" /* Copyright (C) 2009-2012 Fraunhofer SCAI, Schloss Birlinghoven, 53754 Sankt Augustin, Germany; all rights reserved unless otherwise stated. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** Index function to address the two-dimensional arrays Q and R Matrices are stored in column-major order (like Fortran). i is the row, j is the column (index starts at 1) ld is the number of elements for each column */ #define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1)) /* ---------------------------------------------------------------------- */ /* Tuning can be done by different block sizes. */ #define BLOCK1 64 // 8800 GT: 128 x 1 // C1060: 128 x 1 #define BLOCK1X 64 #define BLOCK1Y 8 // 8800 GT: 64 x 4 // C1060: 64 x 8 #define BLOCK2X 512 #define BLOCK2Y 1 /* ---------------------------------------------------------------------- */ /** Kernel for matrix-vector multiplication R(k,k:n) = matmulv( Q(1:m,k:n), Q(1:m) ) Same as this BLAS-2 call: call sgemv('T', m, n-k+1, 1.0, Q(1,k), M, Q(1,k), 1, 0.0, R(k,k), N) The threads in x-dimension are used for parallelization of the dot products, the threads in y-dimension compute different elements of the result vector. Each thread (t1,t2) will be responsible for BLOCK1X columns and BLOCK1Y rows of the matrix Q. */ /* ---------------------------------------------------------------------- */ /** This kernel scales the row k of the matrix R R(k,k:n) = R(k,k:n) * S */ /* ---------------------------------------------------------------------- */ /** This kernel scales the column k of the matrix Q. Q(1:m,k) = Q(1:m,k) * S */ /* ---------------------------------------------------------------------- */ /** This kernel updates the matrix Q by a product of two vectors. Q(1:m,k+1:n) -= R(k,k+1:n) * Q(1:m,k) same as this BLAS-2 call: call sger(M, N-K, -1.0, Q(1,K), 1, R(K,K+1), N, Q(1,K+1), M) Each thread (t1,t2) will be responsible for BLOCK2X columns and BLOCK2Y rows of the matrix Q. */ /* ---------------------------------------------------------------------- */ /** QR factorization of a matrix @param[in] m is number of rows for Q and R @param[in] n is number of columns for Q and R @param[in,out] Q is a matrix of size m x n, column major order @param[out] R is a matrix of size m x n, column major order @returns 0 if successful Q(in) = Q(out) * R, where Q(out) is orthonormal and R upper-triangular */ __global__ void update(float* Q, float* R, int m, int n, int k) { __shared__ float RK[BLOCK2Y]; __shared__ float QK[BLOCK2X]; int tid1 = threadIdx.x; int tid2 = threadIdx.y; int j = blockIdx.y * BLOCK2Y + tid2 + k + 1; if (j < k+1 or j > n) return; if (tid1 == 0) { RK[tid2] = R[IDX2F(k,j,n)]; } for (int i = tid1 + 1; i <= m; i += BLOCK2X ) { if (tid2 == 0) { QK[tid1] = Q[IDX2F(i,k,m)]; } __syncthreads(); Q[IDX2F(i,j,m)] -= QK[tid1] * RK[tid2]; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" /* Copyright (C) 2009-2012 Fraunhofer SCAI, Schloss Birlinghoven, 53754 Sankt Augustin, Germany; all rights reserved unless otherwise stated. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** Index function to address the two-dimensional arrays Q and R Matrices are stored in column-major order (like Fortran). i is the row, j is the column (index starts at 1) ld is the number of elements for each column */ #define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1)) /* ---------------------------------------------------------------------- */ /* Tuning can be done by different block sizes. */ #define BLOCK1 64 // 8800 GT: 128 x 1 // C1060: 128 x 1 #define BLOCK1X 64 #define BLOCK1Y 8 // 8800 GT: 64 x 4 // C1060: 64 x 8 #define BLOCK2X 512 #define BLOCK2Y 1 /* ---------------------------------------------------------------------- */ /** Kernel for matrix-vector multiplication R(k,k:n) = matmulv( Q(1:m,k:n), Q(1:m) ) Same as this BLAS-2 call: call sgemv('T', m, n-k+1, 1.0, Q(1,k), M, Q(1,k), 1, 0.0, R(k,k), N) The threads in x-dimension are used for parallelization of the dot products, the threads in y-dimension compute different elements of the result vector. Each thread (t1,t2) will be responsible for BLOCK1X columns and BLOCK1Y rows of the matrix Q. */ /* ---------------------------------------------------------------------- */ /** This kernel scales the row k of the matrix R R(k,k:n) = R(k,k:n) * S */ /* ---------------------------------------------------------------------- */ /** This kernel scales the column k of the matrix Q. Q(1:m,k) = Q(1:m,k) * S */ /* ---------------------------------------------------------------------- */ /** This kernel updates the matrix Q by a product of two vectors. Q(1:m,k+1:n) -= R(k,k+1:n) * Q(1:m,k) same as this BLAS-2 call: call sger(M, N-K, -1.0, Q(1,K), 1, R(K,K+1), N, Q(1,K+1), M) Each thread (t1,t2) will be responsible for BLOCK2X columns and BLOCK2Y rows of the matrix Q. */ /* ---------------------------------------------------------------------- */ /** QR factorization of a matrix @param[in] m is number of rows for Q and R @param[in] n is number of columns for Q and R @param[in,out] Q is a matrix of size m x n, column major order @param[out] R is a matrix of size m x n, column major order @returns 0 if successful Q(in) = Q(out) * R, where Q(out) is orthonormal and R upper-triangular */ __global__ void update(float* Q, float* R, int m, int n, int k) { __shared__ float RK[BLOCK2Y]; __shared__ float QK[BLOCK2X]; int tid1 = threadIdx.x; int tid2 = threadIdx.y; int j = blockIdx.y * BLOCK2Y + tid2 + k + 1; if (j < k+1 or j > n) return; if (tid1 == 0) { RK[tid2] = R[IDX2F(k,j,n)]; } for (int i = tid1 + 1; i <= m; i += BLOCK2X ) { if (tid2 == 0) { QK[tid1] = Q[IDX2F(i,k,m)]; } __syncthreads(); Q[IDX2F(i,j,m)] -= QK[tid1] * RK[tid2]; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6updatePfS_iii .globl _Z6updatePfS_iii .p2align 8 .type _Z6updatePfS_iii,@function _Z6updatePfS_iii: s_load_b64 s[4:5], s[0:1], 0x14 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add3_u32 v1, s5, s15, v3 v_add_nc_u32_e32 v2, 1, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cmp_lt_i32_e32 vcc_lo, s5, v2 v_cmp_ge_i32_e64 s2, s4, v2 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_8 v_and_b32_e32 v0, 0x3ff, v0 s_mov_b32 s2, exec_lo s_delay_alu instid0(VALU_DEP_1) v_cmpx_eq_u32_e32 0, v0 s_cbranch_execz .LBB0_3 v_mul_lo_u32 v2, v1, s4 s_load_b64 s[6:7], s[0:1], 0x8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add3_u32 v4, s5, -1, v2 v_ashrrev_i32_e32 v5, 31, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[4:5] s_waitcnt lgkmcnt(0) v_add_co_u32 v4, vcc_lo, s6, v4 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo global_load_b32 v2, v[4:5], off v_lshlrev_b32_e32 v4, 2, v3 s_waitcnt vmcnt(0) ds_store_b32 v4, v2 offset:2048 .LBB0_3: s_or_b32 exec_lo, exec_lo, s2 s_load_b32 s4, s[0:1], 0x10 s_waitcnt lgkmcnt(0) v_cmp_gt_i32_e32 vcc_lo, s4, v0 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_8 s_load_b64 s[2:3], s[0:1], 0x0 v_mul_lo_u32 v1, v1, s4 v_cmp_eq_u32_e32 vcc_lo, 0, v3 v_lshlrev_b32_e32 v2, 2, v0 v_lshl_add_u32 v3, v3, 2, 0x800 s_add_i32 s5, s5, -1 s_mov_b32 s1, 0 s_mul_i32 s5, s5, s4 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_6 .p2align 6 .LBB0_5: s_or_b32 exec_lo, exec_lo, s6 v_add_nc_u32_e32 v4, v1, v0 s_waitcnt lgkmcnt(0) s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv v_add_nc_u32_e32 v9, 0x201, v0 v_ashrrev_i32_e32 v5, 31, v4 v_add_nc_u32_e32 v0, 0x200, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[4:5] v_add_co_u32 v4, s0, s2, v4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v5, s0, s3, v5, s0 v_cmp_lt_i32_e64 s0, s4, v9 global_load_b32 v6, v[4:5], off ds_load_b32 v7, v2 ds_load_b32 v8, v3 s_or_b32 s1, s0, s1 s_waitcnt vmcnt(0) lgkmcnt(0) v_fma_f32 v6, -v7, v8, v6 global_store_b32 v[4:5], v6, off s_and_not1_b32 exec_lo, exec_lo, s1 s_cbranch_execz .LBB0_8 .LBB0_6: s_and_saveexec_b32 s6, vcc_lo s_cbranch_execz .LBB0_5 v_add_nc_u32_e32 v4, s5, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v5, 31, v4 v_lshlrev_b64 v[4:5], 2, v[4:5] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_co_u32 v4, s0, s2, v4 v_add_co_ci_u32_e64 v5, s0, s3, v5, s0 global_load_b32 v4, v[4:5], off s_waitcnt vmcnt(0) ds_store_b32 v2, v4 s_branch .LBB0_5 .LBB0_8: s_set_inst_prefetch_distance 0x2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6updatePfS_iii .amdhsa_group_segment_fixed_size 2052 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 10 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6updatePfS_iii, .Lfunc_end0-_Z6updatePfS_iii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 2052 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6updatePfS_iii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z6updatePfS_iii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 10 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" /* Copyright (C) 2009-2012 Fraunhofer SCAI, Schloss Birlinghoven, 53754 Sankt Augustin, Germany; all rights reserved unless otherwise stated. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** Index function to address the two-dimensional arrays Q and R Matrices are stored in column-major order (like Fortran). i is the row, j is the column (index starts at 1) ld is the number of elements for each column */ #define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1)) /* ---------------------------------------------------------------------- */ /* Tuning can be done by different block sizes. */ #define BLOCK1 64 // 8800 GT: 128 x 1 // C1060: 128 x 1 #define BLOCK1X 64 #define BLOCK1Y 8 // 8800 GT: 64 x 4 // C1060: 64 x 8 #define BLOCK2X 512 #define BLOCK2Y 1 /* ---------------------------------------------------------------------- */ /** Kernel for matrix-vector multiplication R(k,k:n) = matmulv( Q(1:m,k:n), Q(1:m) ) Same as this BLAS-2 call: call sgemv('T', m, n-k+1, 1.0, Q(1,k), M, Q(1,k), 1, 0.0, R(k,k), N) The threads in x-dimension are used for parallelization of the dot products, the threads in y-dimension compute different elements of the result vector. Each thread (t1,t2) will be responsible for BLOCK1X columns and BLOCK1Y rows of the matrix Q. */ /* ---------------------------------------------------------------------- */ /** This kernel scales the row k of the matrix R R(k,k:n) = R(k,k:n) * S */ /* ---------------------------------------------------------------------- */ /** This kernel scales the column k of the matrix Q. Q(1:m,k) = Q(1:m,k) * S */ /* ---------------------------------------------------------------------- */ /** This kernel updates the matrix Q by a product of two vectors. Q(1:m,k+1:n) -= R(k,k+1:n) * Q(1:m,k) same as this BLAS-2 call: call sger(M, N-K, -1.0, Q(1,K), 1, R(K,K+1), N, Q(1,K+1), M) Each thread (t1,t2) will be responsible for BLOCK2X columns and BLOCK2Y rows of the matrix Q. */ /* ---------------------------------------------------------------------- */ /** QR factorization of a matrix @param[in] m is number of rows for Q and R @param[in] n is number of columns for Q and R @param[in,out] Q is a matrix of size m x n, column major order @param[out] R is a matrix of size m x n, column major order @returns 0 if successful Q(in) = Q(out) * R, where Q(out) is orthonormal and R upper-triangular */ __global__ void update(float* Q, float* R, int m, int n, int k) { __shared__ float RK[BLOCK2Y]; __shared__ float QK[BLOCK2X]; int tid1 = threadIdx.x; int tid2 = threadIdx.y; int j = blockIdx.y * BLOCK2Y + tid2 + k + 1; if (j < k+1 or j > n) return; if (tid1 == 0) { RK[tid2] = R[IDX2F(k,j,n)]; } for (int i = tid1 + 1; i <= m; i += BLOCK2X ) { if (tid2 == 0) { QK[tid1] = Q[IDX2F(i,k,m)]; } __syncthreads(); Q[IDX2F(i,j,m)] -= QK[tid1] * RK[tid2]; } }
.text .file "update.hip" .globl _Z21__device_stub__updatePfS_iii # -- Begin function _Z21__device_stub__updatePfS_iii .p2align 4, 0x90 .type _Z21__device_stub__updatePfS_iii,@function _Z21__device_stub__updatePfS_iii: # @_Z21__device_stub__updatePfS_iii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 4(%rsp), %rax movq %rax, 112(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6updatePfS_iii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z21__device_stub__updatePfS_iii, .Lfunc_end0-_Z21__device_stub__updatePfS_iii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6updatePfS_iii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z6updatePfS_iii,@object # @_Z6updatePfS_iii .section .rodata,"a",@progbits .globl _Z6updatePfS_iii .p2align 3, 0x0 _Z6updatePfS_iii: .quad _Z21__device_stub__updatePfS_iii .size _Z6updatePfS_iii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z6updatePfS_iii" .size .L__unnamed_1, 17 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__updatePfS_iii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6updatePfS_iii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z6updatePfS_iii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002600 */ /*0020*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */ /* 0x000e240000002200 */ /*0030*/ IADD3 R3, R3, c[0x0][0x178], R0 ; /* 0x00005e0003037a10 */ /* 0x001fc80007ffe000 */ /*0040*/ IADD3 R2, R3, 0x1, RZ ; /* 0x0000000103027810 */ /* 0x000fc80007ffe0ff */ /*0050*/ ISETP.GT.AND P0, PT, R2, c[0x0][0x178], PT ; /* 0x00005e0002007a0c */ /* 0x000fc80003f04270 */ /*0060*/ ISETP.GT.OR P0, PT, R2, c[0x0][0x174], !P0 ; /* 0x00005d0002007a0c */ /* 0x000fda0004704670 */ /*0070*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0080*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */ /* 0x000e220000002100 */ /*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*00a0*/ BSSY B0, 0x160 ; /* 0x000000b000007945 */ /* 0x000fe20003800000 */ /*00b0*/ ISETP.NE.AND P0, PT, R2.reuse, RZ, PT ; /* 0x000000ff0200720c */ /* 0x041fe40003f05270 */ /*00c0*/ ISETP.GE.AND P1, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */ /* 0x000fd60003f26270 */ /*00d0*/ @P0 BRA 0x150 ; /* 0x0000007000000947 */ /* 0x000fea0003800000 */ /*00e0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff047624 */ /* 0x000fe200078e00ff */ /*00f0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fc600000001ff */ /*0100*/ IMAD R4, R3, R4, c[0x0][0x178] ; /* 0x00005e0003047624 */ /* 0x000fca00078e0204 */ /*0110*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */ /* 0x000fca0007ffe0ff */ /*0120*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */ /* 0x000fcc00078e0205 */ /*0130*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea8000c1e1900 */ /*0140*/ STS [R0.X4], R5 ; /* 0x0000000500007388 */ /* 0x0041e40000004800 */ /*0150*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0160*/ @P1 EXIT ; /* 0x000000000000194d */ /* 0x000fea0003800000 */ /*0170*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff057624 */ /* 0x001fe200078e00ff */ /*0180*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fe20003f05270 */ /*0190*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */ /* 0x000fe400078e00ff */ /*01a0*/ IMAD R6, R3, c[0x0][0x170], R2 ; /* 0x00005c0003067a24 */ /* 0x000fe200078e0202 */ /*01b0*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */ /* 0x000fe40007ffe0ff */ /*01c0*/ IADD3 R3, R2, 0x1, RZ ; /* 0x0000000102037810 */ /* 0x000fe20007ffe0ff */ /*01d0*/ IMAD.WIDE R6, R6, R4, c[0x0][0x160] ; /* 0x0000580006067625 */ /* 0x000fc800078e0204 */ /*01e0*/ IMAD R5, R5, c[0x0][0x170], R2 ; /* 0x00005c0005057a24 */ /* 0x000fe200078e0202 */ /*01f0*/ MOV R11, R7 ; /* 0x00000007000b7202 */ /* 0x000fc60000000f00 */ /*0200*/ IMAD.WIDE R4, R5, R4, c[0x0][0x160] ; /* 0x0000580005047625 */ /* 0x000fc800078e0204 */ /*0210*/ IMAD.MOV.U32 R10, RZ, RZ, R4 ; /* 0x000000ffff0a7224 */ /* 0x000fe400078e0004 */ /*0220*/ IMAD.MOV.U32 R13, RZ, RZ, R5 ; /* 0x000000ffff0d7224 */ /* 0x000fc600078e0005 */ /*0230*/ @!P0 MOV R4, R10 ; /* 0x0000000a00048202 */ /* 0x001fe20000000f00 */ /*0240*/ @!P0 IMAD.MOV.U32 R5, RZ, RZ, R13 ; /* 0x000000ffff058224 */ /* 0x000fca00078e000d */ /*0250*/ @!P0 LDG.E R7, [R4.64] ; /* 0x0000000404078981 */ /* 0x0000a2000c1e1900 */ /*0260*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */ /* 0x000fe20003800000 */ /*0270*/ IMAD.MOV.U32 R4, RZ, RZ, R6 ; /* 0x000000ffff047224 */ /* 0x001fe200078e0006 */ /*0280*/ MOV R5, R11 ; /* 0x0000000b00057202 */ /* 0x000fe20000000f00 */ /*0290*/ @!P0 STS [R2.X4+0x4], R7 ; /* 0x0000040702008388 */ /* 0x004fe80000004800 */ /*02a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*02b0*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */ /* 0x000ea2000c1e1900 */ /*02c0*/ IADD3 R3, R3, 0x200, RZ ; /* 0x0000020003037810 */ /* 0x000fc40007ffe0ff */ /*02d0*/ IADD3 R10, P2, R10, 0x800, RZ ; /* 0x000008000a0a7810 */ /* 0x000fe20007f5e0ff */ /*02e0*/ LDS R6, [R0.X4] ; /* 0x0000000000067984 */ /* 0x000fe20000004800 */ /*02f0*/ ISETP.GT.AND P1, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */ /* 0x000fe40003f24270 */ /*0300*/ IADD3.X R13, RZ, R13, RZ, P2, !PT ; /* 0x0000000dff0d7210 */ /* 0x000fe200017fe4ff */ /*0310*/ LDS R9, [R2.X4+0x4] ; /* 0x0000040002097984 */ /* 0x000ea40000004800 */ /*0320*/ FFMA R9, R6, -R9, R8 ; /* 0x8000000906097223 */ /* 0x004fe20000000008 */ /*0330*/ IADD3 R6, P3, R4, 0x800, RZ ; /* 0x0000080004067810 */ /* 0x000fc80007f7e0ff */ /*0340*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */ /* 0x0001e2000c101904 */ /*0350*/ IMAD.X R11, RZ, RZ, R5, P3 ; /* 0x000000ffff0b7224 */ /* 0x000fe400018e0605 */ /*0360*/ @!P1 BRA 0x230 ; /* 0xfffffec000009947 */ /* 0x000fea000383ffff */ /*0370*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0380*/ BRA 0x380; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0390*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0400*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0410*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0420*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0430*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0440*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0450*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0460*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0470*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6updatePfS_iii .globl _Z6updatePfS_iii .p2align 8 .type _Z6updatePfS_iii,@function _Z6updatePfS_iii: s_load_b64 s[4:5], s[0:1], 0x14 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add3_u32 v1, s5, s15, v3 v_add_nc_u32_e32 v2, 1, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cmp_lt_i32_e32 vcc_lo, s5, v2 v_cmp_ge_i32_e64 s2, s4, v2 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_8 v_and_b32_e32 v0, 0x3ff, v0 s_mov_b32 s2, exec_lo s_delay_alu instid0(VALU_DEP_1) v_cmpx_eq_u32_e32 0, v0 s_cbranch_execz .LBB0_3 v_mul_lo_u32 v2, v1, s4 s_load_b64 s[6:7], s[0:1], 0x8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add3_u32 v4, s5, -1, v2 v_ashrrev_i32_e32 v5, 31, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[4:5] s_waitcnt lgkmcnt(0) v_add_co_u32 v4, vcc_lo, s6, v4 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo global_load_b32 v2, v[4:5], off v_lshlrev_b32_e32 v4, 2, v3 s_waitcnt vmcnt(0) ds_store_b32 v4, v2 offset:2048 .LBB0_3: s_or_b32 exec_lo, exec_lo, s2 s_load_b32 s4, s[0:1], 0x10 s_waitcnt lgkmcnt(0) v_cmp_gt_i32_e32 vcc_lo, s4, v0 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_8 s_load_b64 s[2:3], s[0:1], 0x0 v_mul_lo_u32 v1, v1, s4 v_cmp_eq_u32_e32 vcc_lo, 0, v3 v_lshlrev_b32_e32 v2, 2, v0 v_lshl_add_u32 v3, v3, 2, 0x800 s_add_i32 s5, s5, -1 s_mov_b32 s1, 0 s_mul_i32 s5, s5, s4 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_6 .p2align 6 .LBB0_5: s_or_b32 exec_lo, exec_lo, s6 v_add_nc_u32_e32 v4, v1, v0 s_waitcnt lgkmcnt(0) s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv v_add_nc_u32_e32 v9, 0x201, v0 v_ashrrev_i32_e32 v5, 31, v4 v_add_nc_u32_e32 v0, 0x200, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[4:5] v_add_co_u32 v4, s0, s2, v4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v5, s0, s3, v5, s0 v_cmp_lt_i32_e64 s0, s4, v9 global_load_b32 v6, v[4:5], off ds_load_b32 v7, v2 ds_load_b32 v8, v3 s_or_b32 s1, s0, s1 s_waitcnt vmcnt(0) lgkmcnt(0) v_fma_f32 v6, -v7, v8, v6 global_store_b32 v[4:5], v6, off s_and_not1_b32 exec_lo, exec_lo, s1 s_cbranch_execz .LBB0_8 .LBB0_6: s_and_saveexec_b32 s6, vcc_lo s_cbranch_execz .LBB0_5 v_add_nc_u32_e32 v4, s5, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v5, 31, v4 v_lshlrev_b64 v[4:5], 2, v[4:5] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_co_u32 v4, s0, s2, v4 v_add_co_ci_u32_e64 v5, s0, s3, v5, s0 global_load_b32 v4, v[4:5], off s_waitcnt vmcnt(0) ds_store_b32 v2, v4 s_branch .LBB0_5 .LBB0_8: s_set_inst_prefetch_distance 0x2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6updatePfS_iii .amdhsa_group_segment_fixed_size 2052 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 10 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6updatePfS_iii, .Lfunc_end0-_Z6updatePfS_iii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 2052 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6updatePfS_iii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z6updatePfS_iii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 10 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000d72a7_00000000-6_update.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z30__device_stub__Z6updatePfS_iiiPfS_iii .type _Z30__device_stub__Z6updatePfS_iiiPfS_iii, @function _Z30__device_stub__Z6updatePfS_iiiPfS_iii: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 4(%rsp), %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z6updatePfS_iii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z30__device_stub__Z6updatePfS_iiiPfS_iii, .-_Z30__device_stub__Z6updatePfS_iiiPfS_iii .globl _Z6updatePfS_iii .type _Z6updatePfS_iii, @function _Z6updatePfS_iii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z30__device_stub__Z6updatePfS_iiiPfS_iii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z6updatePfS_iii, .-_Z6updatePfS_iii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z6updatePfS_iii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z6updatePfS_iii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "update.hip" .globl _Z21__device_stub__updatePfS_iii # -- Begin function _Z21__device_stub__updatePfS_iii .p2align 4, 0x90 .type _Z21__device_stub__updatePfS_iii,@function _Z21__device_stub__updatePfS_iii: # @_Z21__device_stub__updatePfS_iii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 4(%rsp), %rax movq %rax, 112(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6updatePfS_iii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z21__device_stub__updatePfS_iii, .Lfunc_end0-_Z21__device_stub__updatePfS_iii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6updatePfS_iii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z6updatePfS_iii,@object # @_Z6updatePfS_iii .section .rodata,"a",@progbits .globl _Z6updatePfS_iii .p2align 3, 0x0 _Z6updatePfS_iii: .quad _Z21__device_stub__updatePfS_iii .size _Z6updatePfS_iii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z6updatePfS_iii" .size .L__unnamed_1, 17 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__updatePfS_iii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6updatePfS_iii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void kernel(float *F, double *D) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid == 0) { *F = 12.1; *D = 12.1; } }
code for sm_80 Function : _Z6kernelPfPd .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2UR UR4, SR_CTAID.X ; /* 0x00000000000479c3 */ /* 0x000e220000002500 */ /*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e620000002100 */ /*0030*/ ULDC UR5, c[0x0][0x0] ; /* 0x0000000000057ab9 */ /* 0x000fe40000000800 */ /*0040*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */ /* 0x001fe2000f8e023f */ /*0050*/ IADD3 R0, -R0, RZ, RZ ; /* 0x000000ff00007210 */ /* 0x002fca0007ffe1ff */ /*0060*/ ISETP.NE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */ /* 0x000fda000bf05270 */ /*0070*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0080*/ HFMA2.MMA R6, -RZ, RZ, 0.2249755859375, 0.2249755859375 ; /* 0x33333333ff067435 */ /* 0x000fe200000001ff */ /*0090*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4141999a ; /* 0x4141999aff097424 */ /* 0x000fe200078e00ff */ /*00a0*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */ /* 0x000fe20000000f00 */ /*00b0*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */ /* 0x000fe200078e00ff */ /*00c0*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */ /* 0x000fe20000000f00 */ /*00d0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff057624 */ /* 0x000fe200078e00ff */ /*00e0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*00f0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x40283333 ; /* 0x40283333ff077424 */ /* 0x000fe200078e00ff */ /*0100*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x000fe8000c101904 */ /*0110*/ STG.E.64 [R4.64], R6 ; /* 0x0000000604007986 */ /* 0x000fe2000c101b04 */ /*0120*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0130*/ BRA 0x130; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void kernel(float *F, double *D) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid == 0) { *F = 12.1; *D = 12.1; } }
.file "tmpxft_000ab896_00000000-6_kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2059: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z27__device_stub__Z6kernelPfPdPfPd .type _Z27__device_stub__Z6kernelPfPdPfPd, @function _Z27__device_stub__Z6kernelPfPdPfPd: .LFB2081: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z6kernelPfPd(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2081: .size _Z27__device_stub__Z6kernelPfPdPfPd, .-_Z27__device_stub__Z6kernelPfPdPfPd .globl _Z6kernelPfPd .type _Z6kernelPfPd, @function _Z6kernelPfPd: .LFB2082: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z27__device_stub__Z6kernelPfPdPfPd addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2082: .size _Z6kernelPfPd, .-_Z6kernelPfPd .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z6kernelPfPd" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z6kernelPfPd(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void kernel(float *F, double *D) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid == 0) { *F = 12.1; *D = 12.1; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void kernel(float *F, double *D) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid == 0) { *F = 12.1; *D = 12.1; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void kernel(float *F, double *D) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid == 0) { *F = 12.1; *D = 12.1; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6kernelPfPd .globl _Z6kernelPfPd .p2align 8 .type _Z6kernelPfPd,@function _Z6kernelPfPd: s_load_b32 s2, s[0:1], 0x1c v_sub_nc_u32_e32 v0, 0, v0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) s_mul_i32 s15, s15, s2 s_mov_b32 s2, exec_lo v_cmpx_eq_u32_e64 s15, v0 s_cbranch_execz .LBB0_2 s_load_b128 s[0:3], s[0:1], 0x0 v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, 0x4141999a v_mov_b32_e32 v0, 0x33333333 v_mov_b32_e32 v1, 0x40283333 s_waitcnt lgkmcnt(0) s_clause 0x1 global_store_b32 v2, v3, s[0:1] global_store_b64 v2, v[0:1], s[2:3] .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6kernelPfPd .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6kernelPfPd, .Lfunc_end0-_Z6kernelPfPd .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6kernelPfPd .private_segment_fixed_size: 0 .sgpr_count: 16 .sgpr_spill_count: 0 .symbol: _Z6kernelPfPd.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void kernel(float *F, double *D) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid == 0) { *F = 12.1; *D = 12.1; } }
.text .file "kernel.hip" .globl _Z21__device_stub__kernelPfPd # -- Begin function _Z21__device_stub__kernelPfPd .p2align 4, 0x90 .type _Z21__device_stub__kernelPfPd,@function _Z21__device_stub__kernelPfPd: # @_Z21__device_stub__kernelPfPd .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z6kernelPfPd, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z21__device_stub__kernelPfPd, .Lfunc_end0-_Z21__device_stub__kernelPfPd .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6kernelPfPd, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z6kernelPfPd,@object # @_Z6kernelPfPd .section .rodata,"a",@progbits .globl _Z6kernelPfPd .p2align 3, 0x0 _Z6kernelPfPd: .quad _Z21__device_stub__kernelPfPd .size _Z6kernelPfPd, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z6kernelPfPd" .size .L__unnamed_1, 14 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__kernelPfPd .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6kernelPfPd .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z6kernelPfPd .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2UR UR4, SR_CTAID.X ; /* 0x00000000000479c3 */ /* 0x000e220000002500 */ /*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e620000002100 */ /*0030*/ ULDC UR5, c[0x0][0x0] ; /* 0x0000000000057ab9 */ /* 0x000fe40000000800 */ /*0040*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */ /* 0x001fe2000f8e023f */ /*0050*/ IADD3 R0, -R0, RZ, RZ ; /* 0x000000ff00007210 */ /* 0x002fca0007ffe1ff */ /*0060*/ ISETP.NE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */ /* 0x000fda000bf05270 */ /*0070*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0080*/ HFMA2.MMA R6, -RZ, RZ, 0.2249755859375, 0.2249755859375 ; /* 0x33333333ff067435 */ /* 0x000fe200000001ff */ /*0090*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4141999a ; /* 0x4141999aff097424 */ /* 0x000fe200078e00ff */ /*00a0*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */ /* 0x000fe20000000f00 */ /*00b0*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */ /* 0x000fe200078e00ff */ /*00c0*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */ /* 0x000fe20000000f00 */ /*00d0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff057624 */ /* 0x000fe200078e00ff */ /*00e0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*00f0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x40283333 ; /* 0x40283333ff077424 */ /* 0x000fe200078e00ff */ /*0100*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x000fe8000c101904 */ /*0110*/ STG.E.64 [R4.64], R6 ; /* 0x0000000604007986 */ /* 0x000fe2000c101b04 */ /*0120*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0130*/ BRA 0x130; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6kernelPfPd .globl _Z6kernelPfPd .p2align 8 .type _Z6kernelPfPd,@function _Z6kernelPfPd: s_load_b32 s2, s[0:1], 0x1c v_sub_nc_u32_e32 v0, 0, v0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) s_mul_i32 s15, s15, s2 s_mov_b32 s2, exec_lo v_cmpx_eq_u32_e64 s15, v0 s_cbranch_execz .LBB0_2 s_load_b128 s[0:3], s[0:1], 0x0 v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, 0x4141999a v_mov_b32_e32 v0, 0x33333333 v_mov_b32_e32 v1, 0x40283333 s_waitcnt lgkmcnt(0) s_clause 0x1 global_store_b32 v2, v3, s[0:1] global_store_b64 v2, v[0:1], s[2:3] .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6kernelPfPd .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6kernelPfPd, .Lfunc_end0-_Z6kernelPfPd .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6kernelPfPd .private_segment_fixed_size: 0 .sgpr_count: 16 .sgpr_spill_count: 0 .symbol: _Z6kernelPfPd.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000ab896_00000000-6_kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2059: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z27__device_stub__Z6kernelPfPdPfPd .type _Z27__device_stub__Z6kernelPfPdPfPd, @function _Z27__device_stub__Z6kernelPfPdPfPd: .LFB2081: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z6kernelPfPd(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2081: .size _Z27__device_stub__Z6kernelPfPdPfPd, .-_Z27__device_stub__Z6kernelPfPdPfPd .globl _Z6kernelPfPd .type _Z6kernelPfPd, @function _Z6kernelPfPd: .LFB2082: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z27__device_stub__Z6kernelPfPdPfPd addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2082: .size _Z6kernelPfPd, .-_Z6kernelPfPd .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z6kernelPfPd" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z6kernelPfPd(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "kernel.hip" .globl _Z21__device_stub__kernelPfPd # -- Begin function _Z21__device_stub__kernelPfPd .p2align 4, 0x90 .type _Z21__device_stub__kernelPfPd,@function _Z21__device_stub__kernelPfPd: # @_Z21__device_stub__kernelPfPd .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z6kernelPfPd, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z21__device_stub__kernelPfPd, .Lfunc_end0-_Z21__device_stub__kernelPfPd .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6kernelPfPd, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z6kernelPfPd,@object # @_Z6kernelPfPd .section .rodata,"a",@progbits .globl _Z6kernelPfPd .p2align 3, 0x0 _Z6kernelPfPd: .quad _Z21__device_stub__kernelPfPd .size _Z6kernelPfPd, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z6kernelPfPd" .size .L__unnamed_1, 14 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__kernelPfPd .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6kernelPfPd .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <cstdio> #include <cstdlib> #include <cstring> #include <ctime> /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } void alloc_on_device(float *, float*, void **, void **, int); void free_on_device(void *, void*); void copy_from_device(void *, float*, int); void fprint_mat(FILE *, float *, int); void a_x_mais_y_host(int, float *, float*, int); float* init(int); __global__ void a_x_mais_y_device(int, float* , float*, int); __global__ void a_x_mais_y_device_coalesce(int, float*, float*, int); const int SEQUENTIAL = 1; const int CUDA = 2; const int CUDA_COALESCE = 4; /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char ** argv) { if(argc < 4) { printf("Uso: %s <algoritmo> <tamanho_vetor> <tamanho_bloco> <a opcional> <debug opcional>\n\ algoritmo:\n\ts (Sequencial)\n\tc (Cuda nao coalescente)\n\tcc (Cuda coalescente)\n", argv[0]); return -1; } const char * mode = (argc > 1 ? argv[1] : ""); const int vector_size = (argc > 2 ? atoi(argv[2]) : 0); const int block_size = (argc > 3 ? atoi(argv[3]) : 0); const int debug = argc > 4 ? strcmp(argv[4], "debug") == 0 : 0; srand(time(NULL)); int state = -1; float *x = init(vector_size); float *y = init(vector_size); float *x_device, *y_device; int n_blocks = (vector_size + (block_size - 1)) / block_size; int shared_memory = block_size * 2 * sizeof(float); float a = 0.0f; FILE *f_a; FILE *f_x; FILE *f_y; FILE *f_axy; if(argc > 4) { a = atof(argv[4]); } else { a = (float) (rand() % vector_size); } if(strncmp(mode, "s", 1) == 0) { state = SEQUENTIAL; if(debug) { f_a = fopen("y_s.txt", "w"); f_x = fopen("x_s.txt", "w"); f_y = fopen("y_s.txt", "w"); f_axy = fopen("axy_s.txt", "w"); } } else if(strlen(mode) == 1 && strncmp(mode, "c", 1) == 0) { state = CUDA; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_c.txt", "w"); f_y = fopen("y_c.txt", "w"); f_axy = fopen("axy_c.txt", "w"); } } else if(strncmp(mode, "cc", 2) == 0) { state = CUDA_COALESCE; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_cc.txt", "w"); f_y = fopen("y_cc.txt", "w"); f_axy = fopen("axy_cc.txt", "w"); } } if(debug) { fprintf(f_a, "%f", a); fprint_mat(f_x, x, vector_size); fprint_mat(f_y, y, vector_size); } if(state == SEQUENTIAL) { a_x_mais_y_host(a, x, y, vector_size); } else { alloc_on_device(x, y, (void**)&x_device, (void**) &y_device, vector_size); if(state == CUDA) { a_x_mais_y_device<<<n_blocks, block_size>>>(a, x_device, y_device, vector_size); } else if(state == CUDA_COALESCE) { a_x_mais_y_device_coalesce<<<n_blocks, block_size, shared_memory>>>(a, x_device, y_device, vector_size); } copy_from_device(y_device, y, vector_size); } if(debug) { fprint_mat(f_axy, y, vector_size); fclose(f_x); fclose(f_y); fclose(f_axy); fclose(f_a); } free(x); free(y); return 0; } void fprint_mat(FILE *f, float *v, int n) { for(int i = 0; i < n-1; i++) { fprintf(f, "%f ", v[i]); } fprintf(f, "%f", v[n-1]); } void alloc_on_device(float *x, float *y, void **x_ptr, void **y_ptr, int n) { size_t size = sizeof(float) * n; CUDA_CHECK_RETURN(cudaMalloc(x_ptr, size)); CUDA_CHECK_RETURN(cudaMalloc(y_ptr, size)); CUDA_CHECK_RETURN(cudaMemcpy(*x_ptr, x, size, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(*y_ptr, y, size, cudaMemcpyHostToDevice)); } void copy_from_device(void *v_ptr, float *v, int n) { cudaMemcpy(v, v_ptr, sizeof(float) * n, cudaMemcpyDeviceToHost); } void free_on_device(void *x_ptr, void *y_ptr) { cudaFree(x_ptr); cudaFree(y_ptr); } void a_x_mais_y_host(int a, float *x, float* y, int n) { for(int i = 0; i < n; ++i) { y[i] = (a * x[i]) + y[i]; } } float* init(int n) { float *v = (float*) malloc(sizeof(float) * n); for(int i = 0; i < n; ++i) { v[i] = (float) i; } return v; } //////////////////////////////////////////////////////////////////////////////// __global__ void a_x_mais_y_device(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < n) { y[i] = a * x[i] + y[i]; } } __global__ void a_x_mais_y_device_coalesce(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; int ti = threadIdx.x; extern __shared__ float sharedX[]; float *sharedY = &sharedX[0] + blockDim.x; if(i < n) { sharedX[ti] = x[i]; sharedY[ti] = y[i]; y[i] = a * sharedX[ti] + sharedY[ti]; } }
code for sm_80 Function : _Z26a_x_mais_y_device_coalesceiPfS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */ /* 0x000e280000002500 */ /*0020*/ S2R R11, SR_TID.X ; /* 0x00000000000b7919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R4, R4, c[0x0][0x0], R11 ; /* 0x0000000004047a24 */ /* 0x001fca00078e020b */ /*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x178], PT ; /* 0x00005e0004007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R2, R4, R7, c[0x0][0x168] ; /* 0x00005a0004027625 */ /* 0x000fc800078e0207 */ /*0090*/ IMAD.WIDE R4, R4, R7, c[0x0][0x170] ; /* 0x00005c0004047625 */ /* 0x000fe400078e0207 */ /*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */ /* 0x000ee2000c1e1900 */ /*00c0*/ SHF.L.U32 R0, R11, 0x2, RZ ; /* 0x000000020b007819 */ /* 0x000fca00000006ff */ /*00d0*/ IMAD R7, R7, c[0x0][0x0], R0 ; /* 0x0000000007077a24 */ /* 0x000fe400078e0200 */ /*00e0*/ I2F R0, c[0x0][0x160] ; /* 0x0000580000007b06 */ /* 0x000e220000201400 */ /*00f0*/ STS [R11.X4], R2 ; /* 0x000000020b007388 */ /* 0x004fe80000004800 */ /*0100*/ STS [R7], R6 ; /* 0x0000000607007388 */ /* 0x008fe80000000800 */ /*0110*/ LDS R9, [R11.X4] ; /* 0x000000000b097984 */ /* 0x000e240000004800 */ /*0120*/ FFMA R9, R9, R0, R6 ; /* 0x0000000009097223 */ /* 0x001fca0000000006 */ /*0130*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */ /* 0x000fe2000c101904 */ /*0140*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0150*/ BRA 0x150; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z17a_x_mais_y_deviceiPfS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x178], PT ; /* 0x00005e0004007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */ /* 0x000fc800078e0205 */ /*0090*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */ /* 0x000fe400078e0205 */ /*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ I2F R7, c[0x0][0x160] ; /* 0x0000580000077b06 */ /* 0x000ea40000201400 */ /*00d0*/ FFMA R7, R2, R7, R0 ; /* 0x0000000702077223 */ /* 0x004fca0000000000 */ /*00e0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x000fe2000c101904 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <cstdio> #include <cstdlib> #include <cstring> #include <ctime> /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } void alloc_on_device(float *, float*, void **, void **, int); void free_on_device(void *, void*); void copy_from_device(void *, float*, int); void fprint_mat(FILE *, float *, int); void a_x_mais_y_host(int, float *, float*, int); float* init(int); __global__ void a_x_mais_y_device(int, float* , float*, int); __global__ void a_x_mais_y_device_coalesce(int, float*, float*, int); const int SEQUENTIAL = 1; const int CUDA = 2; const int CUDA_COALESCE = 4; /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char ** argv) { if(argc < 4) { printf("Uso: %s <algoritmo> <tamanho_vetor> <tamanho_bloco> <a opcional> <debug opcional>\n\ algoritmo:\n\ts (Sequencial)\n\tc (Cuda nao coalescente)\n\tcc (Cuda coalescente)\n", argv[0]); return -1; } const char * mode = (argc > 1 ? argv[1] : ""); const int vector_size = (argc > 2 ? atoi(argv[2]) : 0); const int block_size = (argc > 3 ? atoi(argv[3]) : 0); const int debug = argc > 4 ? strcmp(argv[4], "debug") == 0 : 0; srand(time(NULL)); int state = -1; float *x = init(vector_size); float *y = init(vector_size); float *x_device, *y_device; int n_blocks = (vector_size + (block_size - 1)) / block_size; int shared_memory = block_size * 2 * sizeof(float); float a = 0.0f; FILE *f_a; FILE *f_x; FILE *f_y; FILE *f_axy; if(argc > 4) { a = atof(argv[4]); } else { a = (float) (rand() % vector_size); } if(strncmp(mode, "s", 1) == 0) { state = SEQUENTIAL; if(debug) { f_a = fopen("y_s.txt", "w"); f_x = fopen("x_s.txt", "w"); f_y = fopen("y_s.txt", "w"); f_axy = fopen("axy_s.txt", "w"); } } else if(strlen(mode) == 1 && strncmp(mode, "c", 1) == 0) { state = CUDA; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_c.txt", "w"); f_y = fopen("y_c.txt", "w"); f_axy = fopen("axy_c.txt", "w"); } } else if(strncmp(mode, "cc", 2) == 0) { state = CUDA_COALESCE; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_cc.txt", "w"); f_y = fopen("y_cc.txt", "w"); f_axy = fopen("axy_cc.txt", "w"); } } if(debug) { fprintf(f_a, "%f", a); fprint_mat(f_x, x, vector_size); fprint_mat(f_y, y, vector_size); } if(state == SEQUENTIAL) { a_x_mais_y_host(a, x, y, vector_size); } else { alloc_on_device(x, y, (void**)&x_device, (void**) &y_device, vector_size); if(state == CUDA) { a_x_mais_y_device<<<n_blocks, block_size>>>(a, x_device, y_device, vector_size); } else if(state == CUDA_COALESCE) { a_x_mais_y_device_coalesce<<<n_blocks, block_size, shared_memory>>>(a, x_device, y_device, vector_size); } copy_from_device(y_device, y, vector_size); } if(debug) { fprint_mat(f_axy, y, vector_size); fclose(f_x); fclose(f_y); fclose(f_axy); fclose(f_a); } free(x); free(y); return 0; } void fprint_mat(FILE *f, float *v, int n) { for(int i = 0; i < n-1; i++) { fprintf(f, "%f ", v[i]); } fprintf(f, "%f", v[n-1]); } void alloc_on_device(float *x, float *y, void **x_ptr, void **y_ptr, int n) { size_t size = sizeof(float) * n; CUDA_CHECK_RETURN(cudaMalloc(x_ptr, size)); CUDA_CHECK_RETURN(cudaMalloc(y_ptr, size)); CUDA_CHECK_RETURN(cudaMemcpy(*x_ptr, x, size, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(*y_ptr, y, size, cudaMemcpyHostToDevice)); } void copy_from_device(void *v_ptr, float *v, int n) { cudaMemcpy(v, v_ptr, sizeof(float) * n, cudaMemcpyDeviceToHost); } void free_on_device(void *x_ptr, void *y_ptr) { cudaFree(x_ptr); cudaFree(y_ptr); } void a_x_mais_y_host(int a, float *x, float* y, int n) { for(int i = 0; i < n; ++i) { y[i] = (a * x[i]) + y[i]; } } float* init(int n) { float *v = (float*) malloc(sizeof(float) * n); for(int i = 0; i < n; ++i) { v[i] = (float) i; } return v; } //////////////////////////////////////////////////////////////////////////////// __global__ void a_x_mais_y_device(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < n) { y[i] = a * x[i] + y[i]; } } __global__ void a_x_mais_y_device_coalesce(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; int ti = threadIdx.x; extern __shared__ float sharedX[]; float *sharedY = &sharedX[0] + blockDim.x; if(i < n) { sharedX[ti] = x[i]; sharedY[ti] = y[i]; y[i] = a * sharedX[ti] + sharedY[ti]; } }
.file "tmpxft_0016d499_00000000-6_AXMaisY.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2066: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2066: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%f " .LC1: .string "%f" .text .globl _Z10fprint_matP8_IO_FILEPfi .type _Z10fprint_matP8_IO_FILEPfi, @function _Z10fprint_matP8_IO_FILEPfi: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movq %rdi, %rbp movq %rsi, %r15 movl %edx, %r14d cmpl $1, %edx jle .L4 movq %rsi, %rbx leal -2(%rdx), %eax leaq 4(%rsi,%rax,4), %r13 leaq .LC0(%rip), %r12 .L5: pxor %xmm0, %xmm0 cvtss2sd (%rbx), %xmm0 movq %r12, %rdx movl $2, %esi movq %rbp, %rdi movl $1, %eax call __fprintf_chk@PLT addq $4, %rbx cmpq %r13, %rbx jne .L5 .L4: movslq %r14d, %r14 pxor %xmm0, %xmm0 cvtss2sd -4(%r15,%r14,4), %xmm0 leaq .LC1(%rip), %rdx movl $2, %esi movq %rbp, %rdi movl $1, %eax call __fprintf_chk@PLT addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _Z10fprint_matP8_IO_FILEPfi, .-_Z10fprint_matP8_IO_FILEPfi .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC2: .string "/home/ubuntu/Datasets/stackv2/train-structured/wakim/prog-gpu-cuda/master/AXMaisY/src/AXMaisY.cu" .align 8 .LC3: .string "Error %s at line %d in file %s\n" .text .globl _Z15alloc_on_devicePfS_PPvS1_i .type _Z15alloc_on_devicePfS_PPvS1_i, @function _Z15alloc_on_devicePfS_PPvS1_i: .LFB2059: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 movq %rdi, %r13 movq %rsi, %r14 movq %rdx, %rbp movq %rcx, %r12 movslq %r8d, %rbx salq $2, %rbx movq %rbx, %rsi movq %rdx, %rdi call cudaMalloc@PLT testl %eax, %eax jne .L14 movq %rbx, %rsi movq %r12, %rdi call cudaMalloc@PLT testl %eax, %eax jne .L15 movq 0(%rbp), %rdi movl $1, %ecx movq %rbx, %rdx movq %r13, %rsi call cudaMemcpy@PLT testl %eax, %eax jne .L16 movq (%r12), %rdi movl $1, %ecx movq %rbx, %rdx movq %r14, %rsi call cudaMemcpy@PLT testl %eax, %eax jne .L17 popq %rbx .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx leaq .LC2(%rip), %r9 movl $149, %r8d leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L15: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx leaq .LC2(%rip), %r9 movl $150, %r8d leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L16: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx leaq .LC2(%rip), %r9 movl $151, %r8d leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L17: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx leaq .LC2(%rip), %r9 movl $152, %r8d leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .cfi_endproc .LFE2059: .size _Z15alloc_on_devicePfS_PPvS1_i, .-_Z15alloc_on_devicePfS_PPvS1_i .globl _Z16copy_from_devicePvPfi .type _Z16copy_from_devicePvPfi, @function _Z16copy_from_devicePvPfi: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq %rdi, %rax movq %rsi, %rdi movslq %edx, %rdx salq $2, %rdx movl $2, %ecx movq %rax, %rsi call cudaMemcpy@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _Z16copy_from_devicePvPfi, .-_Z16copy_from_devicePvPfi .globl _Z14free_on_devicePvS_ .type _Z14free_on_devicePvS_, @function _Z14free_on_devicePvS_: .LFB2061: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movq %rsi, %rbx call cudaFree@PLT movq %rbx, %rdi call cudaFree@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _Z14free_on_devicePvS_, .-_Z14free_on_devicePvS_ .globl _Z15a_x_mais_y_hostiPfS_i .type _Z15a_x_mais_y_hostiPfS_i, @function _Z15a_x_mais_y_hostiPfS_i: .LFB2062: .cfi_startproc endbr64 testl %ecx, %ecx jle .L22 movslq %ecx, %rcx salq $2, %rcx movl $0, %eax pxor %xmm1, %xmm1 cvtsi2ssl %edi, %xmm1 .L24: movaps %xmm1, %xmm0 mulss (%rsi,%rax), %xmm0 addss (%rdx,%rax), %xmm0 movss %xmm0, (%rdx,%rax) addq $4, %rax cmpq %rcx, %rax jne .L24 .L22: ret .cfi_endproc .LFE2062: .size _Z15a_x_mais_y_hostiPfS_i, .-_Z15a_x_mais_y_hostiPfS_i .globl _Z4initi .type _Z4initi, @function _Z4initi: .LFB2063: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movl %edi, %ebp movslq %edi, %rbx leaq 0(,%rbx,4), %rdi call malloc@PLT testl %ebp, %ebp jle .L26 movl $0, %edx .L28: pxor %xmm0, %xmm0 cvtsi2ssl %edx, %xmm0 movss %xmm0, (%rax,%rdx,4) addq $1, %rdx cmpq %rdx, %rbx jne .L28 .L26: addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _Z4initi, .-_Z4initi .globl _Z41__device_stub__Z17a_x_mais_y_deviceiPfS_iiPfS_i .type _Z41__device_stub__Z17a_x_mais_y_deviceiPfS_iiPfS_i, @function _Z41__device_stub__Z17a_x_mais_y_deviceiPfS_iiPfS_i: .LFB2088: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 28(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 24(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 24(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L35 .L31: movq 136(%rsp), %rax subq %fs:40, %rax jne .L36 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L35: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z17a_x_mais_y_deviceiPfS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L31 .L36: call __stack_chk_fail@PLT .cfi_endproc .LFE2088: .size _Z41__device_stub__Z17a_x_mais_y_deviceiPfS_iiPfS_i, .-_Z41__device_stub__Z17a_x_mais_y_deviceiPfS_iiPfS_i .globl _Z17a_x_mais_y_deviceiPfS_i .type _Z17a_x_mais_y_deviceiPfS_i, @function _Z17a_x_mais_y_deviceiPfS_i: .LFB2089: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z41__device_stub__Z17a_x_mais_y_deviceiPfS_iiPfS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _Z17a_x_mais_y_deviceiPfS_i, .-_Z17a_x_mais_y_deviceiPfS_i .globl _Z50__device_stub__Z26a_x_mais_y_device_coalesceiPfS_iiPfS_i .type _Z50__device_stub__Z26a_x_mais_y_device_coalesceiPfS_iiPfS_i, @function _Z50__device_stub__Z26a_x_mais_y_device_coalesceiPfS_iiPfS_i: .LFB2090: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 28(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 24(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 24(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L43 .L39: movq 136(%rsp), %rax subq %fs:40, %rax jne .L44 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L43: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z26a_x_mais_y_device_coalesceiPfS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L39 .L44: call __stack_chk_fail@PLT .cfi_endproc .LFE2090: .size _Z50__device_stub__Z26a_x_mais_y_device_coalesceiPfS_iiPfS_i, .-_Z50__device_stub__Z26a_x_mais_y_device_coalesceiPfS_iiPfS_i .globl _Z26a_x_mais_y_device_coalesceiPfS_i .type _Z26a_x_mais_y_device_coalesceiPfS_i, @function _Z26a_x_mais_y_device_coalesceiPfS_i: .LFB2091: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z50__device_stub__Z26a_x_mais_y_device_coalesceiPfS_iiPfS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2091: .size _Z26a_x_mais_y_device_coalesceiPfS_i, .-_Z26a_x_mais_y_device_coalesceiPfS_i .section .rodata.str1.8 .align 8 .LC4: .string "Uso: %s <algoritmo> <tamanho_vetor> <tamanho_bloco> <a opcional> <debug opcional>\nalgoritmo:\n\ts (Sequencial)\n\tc (Cuda nao coalescente)\n\tcc (Cuda coalescente)\n" .section .rodata.str1.1 .LC5: .string "debug" .LC6: .string "w" .LC7: .string "y_s.txt" .LC8: .string "x_s.txt" .LC9: .string "axy_s.txt" .LC10: .string "y_c.txt" .LC11: .string "x_c.txt" .LC12: .string "axy_c.txt" .LC13: .string "cc" .LC14: .string "x_cc.txt" .LC15: .string "y_cc.txt" .LC16: .string "axy_cc.txt" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $120, %rsp .cfi_def_cfa_offset 176 movq %rsi, %rbx movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax cmpl $3, %edi jle .L71 movl %edi, %ebp movq 8(%rsi), %r14 movq 16(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, 24(%rsp) movl %eax, %r12d movq 24(%rbx), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, 16(%rsp) cmpl $4, %ebp jle .L50 movq 32(%rbx), %rdi leaq .LC5(%rip), %rsi call strcmp@PLT testl %eax, %eax sete %r15b movzbl %r15b, %r15d movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT movl %r12d, %edi call _Z4initi movq %rax, %r13 movl %r12d, %edi call _Z4initi movq %rax, %rbp movq 32(%rbx), %rdi movl $0, %esi call strtod@PLT pxor %xmm1, %xmm1 cvtsd2ss %xmm0, %xmm1 movss %xmm1, 12(%rsp) jmp .L65 .L71: movq (%rsi), %rdx leaq .LC4(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $-1, %eax jmp .L47 .L77: testl %r15d, %r15d jne .L72 .L52: cvttss2sil 12(%rsp), %edi movl %r12d, %ecx movq %rbp, %rdx movq %r13, %rsi call _Z15a_x_mais_y_hostiPfS_i jmp .L58 .L72: leaq .LC6(%rip), %rbx movq %rbx, %rsi leaq .LC7(%rip), %r14 movq %r14, %rdi call fopen@PLT movq %rax, 32(%rsp) movq %rbx, %rsi leaq .LC8(%rip), %rdi call fopen@PLT movq %rax, 40(%rsp) movq %rbx, %rsi movq %r14, %rdi call fopen@PLT movq %rax, 48(%rsp) movq %rbx, %rsi leaq .LC9(%rip), %rdi call fopen@PLT movq %rax, 56(%rsp) movl %r15d, %ebx .L64: pxor %xmm0, %xmm0 cvtss2sd 12(%rsp), %xmm0 leaq .LC1(%rip), %rdx movl $2, %esi movq 32(%rsp), %rdi movl $1, %eax call __fprintf_chk@PLT movl %r12d, %edx movq %r13, %rsi movq 40(%rsp), %rdi call _Z10fprint_matP8_IO_FILEPfi movl %r12d, %edx movq %rbp, %rsi movq 48(%rsp), %rdi call _Z10fprint_matP8_IO_FILEPfi cmpl $1, %ebx je .L52 .L57: movq 16(%rsp), %rcx movq 24(%rsp), %rsi leal -1(%rcx,%rsi), %eax cltd idivl %ecx movl %eax, %r14d leaq 72(%rsp), %rcx leaq 64(%rsp), %rdx movl %esi, %r8d movq %rbp, %rsi movq %r13, %rdi call _Z15alloc_on_devicePfS_PPvS1_i cmpl $2, %ebx je .L73 cmpl $4, %ebx je .L74 .L61: movl %r12d, %edx movq %rbp, %rsi movq 72(%rsp), %rdi call _Z16copy_from_devicePvPfi .L58: testl %r15d, %r15d jne .L75 .L63: movq %r13, %rdi call free@PLT movq %rbp, %rdi call free@PLT movl $0, %eax .L47: movq 104(%rsp), %rdx subq %fs:40, %rdx jne .L76 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L78: .cfi_restore_state movl $2, %ebx testl %r15d, %r15d je .L57 leaq .LC6(%rip), %rbx movq %rbx, %rsi leaq .LC10(%rip), %r14 movq %r14, %rdi call fopen@PLT movq %rax, 32(%rsp) movq %rbx, %rsi leaq .LC11(%rip), %rdi call fopen@PLT movq %rax, 40(%rsp) movq %rbx, %rsi movq %r14, %rdi call fopen@PLT movq %rax, 48(%rsp) movq %rbx, %rsi leaq .LC12(%rip), %rdi call fopen@PLT movq %rax, 56(%rsp) movl $2, %ebx jmp .L64 .L69: movl $-1, %ebx jmp .L64 .L73: movl 16(%rsp), %eax movl %eax, 92(%rsp) movl $1, 96(%rsp) movl $1, 100(%rsp) movl %r14d, 80(%rsp) movl $1, 84(%rsp) movl $1, 88(%rsp) movl $0, %r9d movl $0, %r8d movq 92(%rsp), %rdx movl $1, %ecx movq 80(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L61 cvttss2sil 12(%rsp), %edi movl %r12d, %ecx movq 72(%rsp), %rdx movq 64(%rsp), %rsi call _Z41__device_stub__Z17a_x_mais_y_deviceiPfS_iiPfS_i jmp .L61 .L74: movq 16(%rsp), %rax movl %eax, 92(%rsp) movl $1, 96(%rsp) movl $1, 100(%rsp) movl %r14d, 80(%rsp) movl $1, 84(%rsp) movl $1, 88(%rsp) movl %eax, %r8d sall $3, %r8d movl $0, %r9d movslq %r8d, %r8 movq 92(%rsp), %rdx movl $1, %ecx movq 80(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L61 cvttss2sil 12(%rsp), %edi movl %r12d, %ecx movq 72(%rsp), %rdx movq 64(%rsp), %rsi call _Z50__device_stub__Z26a_x_mais_y_device_coalesceiPfS_iiPfS_i jmp .L61 .L75: movl %r12d, %edx movq %rbp, %rsi movq 56(%rsp), %rbx movq %rbx, %rdi call _Z10fprint_matP8_IO_FILEPfi movq 40(%rsp), %rdi call fclose@PLT movq 48(%rsp), %rdi call fclose@PLT movq %rbx, %rdi call fclose@PLT movq 32(%rsp), %rdi call fclose@PLT jmp .L63 .L56: testl %r15d, %r15d jne .L69 leaq 72(%rsp), %rcx leaq 64(%rsp), %rdx movl 24(%rsp), %r8d movq %rbp, %rsi movq %r13, %rdi call _Z15alloc_on_devicePfS_PPvS1_i jmp .L61 .L50: movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT movl %r12d, %edi call _Z4initi movq %rax, %r13 movl %r12d, %edi call _Z4initi movq %rax, %rbp call rand@PLT cltd idivl %r12d pxor %xmm2, %xmm2 cvtsi2ssl %edx, %xmm2 movss %xmm2, 12(%rsp) movl $0, %r15d .L65: movzbl (%r14), %ebx cmpb $115, %bl je .L77 movq %r14, %rdi call strlen@PLT cmpb $99, %bl jne .L54 cmpq $1, %rax je .L78 .L54: movl $2, %edx leaq .LC13(%rip), %rsi movq %r14, %rdi call strncmp@PLT testl %eax, %eax jne .L56 movl $4, %ebx testl %r15d, %r15d je .L57 leaq .LC6(%rip), %rbx movq %rbx, %rsi leaq .LC10(%rip), %rdi call fopen@PLT movq %rax, 32(%rsp) movq %rbx, %rsi leaq .LC14(%rip), %rdi call fopen@PLT movq %rax, 40(%rsp) movq %rbx, %rsi leaq .LC15(%rip), %rdi call fopen@PLT movq %rax, 48(%rsp) movq %rbx, %rsi leaq .LC16(%rip), %rdi call fopen@PLT movq %rax, 56(%rsp) movl $4, %ebx jmp .L64 .L76: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.8 .align 8 .LC17: .string "_Z26a_x_mais_y_device_coalesceiPfS_i" .section .rodata.str1.1 .LC18: .string "_Z17a_x_mais_y_deviceiPfS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2093: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC17(%rip), %rdx movq %rdx, %rcx leaq _Z26a_x_mais_y_device_coalesceiPfS_i(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC18(%rip), %rdx movq %rdx, %rcx leaq _Z17a_x_mais_y_deviceiPfS_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2093: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <cstdio> #include <cstdlib> #include <cstring> #include <ctime> /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } void alloc_on_device(float *, float*, void **, void **, int); void free_on_device(void *, void*); void copy_from_device(void *, float*, int); void fprint_mat(FILE *, float *, int); void a_x_mais_y_host(int, float *, float*, int); float* init(int); __global__ void a_x_mais_y_device(int, float* , float*, int); __global__ void a_x_mais_y_device_coalesce(int, float*, float*, int); const int SEQUENTIAL = 1; const int CUDA = 2; const int CUDA_COALESCE = 4; /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char ** argv) { if(argc < 4) { printf("Uso: %s <algoritmo> <tamanho_vetor> <tamanho_bloco> <a opcional> <debug opcional>\n\ algoritmo:\n\ts (Sequencial)\n\tc (Cuda nao coalescente)\n\tcc (Cuda coalescente)\n", argv[0]); return -1; } const char * mode = (argc > 1 ? argv[1] : ""); const int vector_size = (argc > 2 ? atoi(argv[2]) : 0); const int block_size = (argc > 3 ? atoi(argv[3]) : 0); const int debug = argc > 4 ? strcmp(argv[4], "debug") == 0 : 0; srand(time(NULL)); int state = -1; float *x = init(vector_size); float *y = init(vector_size); float *x_device, *y_device; int n_blocks = (vector_size + (block_size - 1)) / block_size; int shared_memory = block_size * 2 * sizeof(float); float a = 0.0f; FILE *f_a; FILE *f_x; FILE *f_y; FILE *f_axy; if(argc > 4) { a = atof(argv[4]); } else { a = (float) (rand() % vector_size); } if(strncmp(mode, "s", 1) == 0) { state = SEQUENTIAL; if(debug) { f_a = fopen("y_s.txt", "w"); f_x = fopen("x_s.txt", "w"); f_y = fopen("y_s.txt", "w"); f_axy = fopen("axy_s.txt", "w"); } } else if(strlen(mode) == 1 && strncmp(mode, "c", 1) == 0) { state = CUDA; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_c.txt", "w"); f_y = fopen("y_c.txt", "w"); f_axy = fopen("axy_c.txt", "w"); } } else if(strncmp(mode, "cc", 2) == 0) { state = CUDA_COALESCE; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_cc.txt", "w"); f_y = fopen("y_cc.txt", "w"); f_axy = fopen("axy_cc.txt", "w"); } } if(debug) { fprintf(f_a, "%f", a); fprint_mat(f_x, x, vector_size); fprint_mat(f_y, y, vector_size); } if(state == SEQUENTIAL) { a_x_mais_y_host(a, x, y, vector_size); } else { alloc_on_device(x, y, (void**)&x_device, (void**) &y_device, vector_size); if(state == CUDA) { a_x_mais_y_device<<<n_blocks, block_size>>>(a, x_device, y_device, vector_size); } else if(state == CUDA_COALESCE) { a_x_mais_y_device_coalesce<<<n_blocks, block_size, shared_memory>>>(a, x_device, y_device, vector_size); } copy_from_device(y_device, y, vector_size); } if(debug) { fprint_mat(f_axy, y, vector_size); fclose(f_x); fclose(f_y); fclose(f_axy); fclose(f_a); } free(x); free(y); return 0; } void fprint_mat(FILE *f, float *v, int n) { for(int i = 0; i < n-1; i++) { fprintf(f, "%f ", v[i]); } fprintf(f, "%f", v[n-1]); } void alloc_on_device(float *x, float *y, void **x_ptr, void **y_ptr, int n) { size_t size = sizeof(float) * n; CUDA_CHECK_RETURN(cudaMalloc(x_ptr, size)); CUDA_CHECK_RETURN(cudaMalloc(y_ptr, size)); CUDA_CHECK_RETURN(cudaMemcpy(*x_ptr, x, size, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(*y_ptr, y, size, cudaMemcpyHostToDevice)); } void copy_from_device(void *v_ptr, float *v, int n) { cudaMemcpy(v, v_ptr, sizeof(float) * n, cudaMemcpyDeviceToHost); } void free_on_device(void *x_ptr, void *y_ptr) { cudaFree(x_ptr); cudaFree(y_ptr); } void a_x_mais_y_host(int a, float *x, float* y, int n) { for(int i = 0; i < n; ++i) { y[i] = (a * x[i]) + y[i]; } } float* init(int n) { float *v = (float*) malloc(sizeof(float) * n); for(int i = 0; i < n; ++i) { v[i] = (float) i; } return v; } //////////////////////////////////////////////////////////////////////////////// __global__ void a_x_mais_y_device(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < n) { y[i] = a * x[i] + y[i]; } } __global__ void a_x_mais_y_device_coalesce(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; int ti = threadIdx.x; extern __shared__ float sharedX[]; float *sharedY = &sharedX[0] + blockDim.x; if(i < n) { sharedX[ti] = x[i]; sharedY[ti] = y[i]; y[i] = a * sharedX[ti] + sharedY[ti]; } }
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <hip/hip_runtime.h> #include <cstdio> #include <cstdlib> #include <cstring> #include <ctime> /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } void alloc_on_device(float *, float*, void **, void **, int); void free_on_device(void *, void*); void copy_from_device(void *, float*, int); void fprint_mat(FILE *, float *, int); void a_x_mais_y_host(int, float *, float*, int); float* init(int); __global__ void a_x_mais_y_device(int, float* , float*, int); __global__ void a_x_mais_y_device_coalesce(int, float*, float*, int); const int SEQUENTIAL = 1; const int CUDA = 2; const int CUDA_COALESCE = 4; /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char ** argv) { if(argc < 4) { printf("Uso: %s <algoritmo> <tamanho_vetor> <tamanho_bloco> <a opcional> <debug opcional>\n\ algoritmo:\n\ts (Sequencial)\n\tc (Cuda nao coalescente)\n\tcc (Cuda coalescente)\n", argv[0]); return -1; } const char * mode = (argc > 1 ? argv[1] : ""); const int vector_size = (argc > 2 ? atoi(argv[2]) : 0); const int block_size = (argc > 3 ? atoi(argv[3]) : 0); const int debug = argc > 4 ? strcmp(argv[4], "debug") == 0 : 0; srand(time(NULL)); int state = -1; float *x = init(vector_size); float *y = init(vector_size); float *x_device, *y_device; int n_blocks = (vector_size + (block_size - 1)) / block_size; int shared_memory = block_size * 2 * sizeof(float); float a = 0.0f; FILE *f_a; FILE *f_x; FILE *f_y; FILE *f_axy; if(argc > 4) { a = atof(argv[4]); } else { a = (float) (rand() % vector_size); } if(strncmp(mode, "s", 1) == 0) { state = SEQUENTIAL; if(debug) { f_a = fopen("y_s.txt", "w"); f_x = fopen("x_s.txt", "w"); f_y = fopen("y_s.txt", "w"); f_axy = fopen("axy_s.txt", "w"); } } else if(strlen(mode) == 1 && strncmp(mode, "c", 1) == 0) { state = CUDA; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_c.txt", "w"); f_y = fopen("y_c.txt", "w"); f_axy = fopen("axy_c.txt", "w"); } } else if(strncmp(mode, "cc", 2) == 0) { state = CUDA_COALESCE; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_cc.txt", "w"); f_y = fopen("y_cc.txt", "w"); f_axy = fopen("axy_cc.txt", "w"); } } if(debug) { fprintf(f_a, "%f", a); fprint_mat(f_x, x, vector_size); fprint_mat(f_y, y, vector_size); } if(state == SEQUENTIAL) { a_x_mais_y_host(a, x, y, vector_size); } else { alloc_on_device(x, y, (void**)&x_device, (void**) &y_device, vector_size); if(state == CUDA) { a_x_mais_y_device<<<n_blocks, block_size>>>(a, x_device, y_device, vector_size); } else if(state == CUDA_COALESCE) { a_x_mais_y_device_coalesce<<<n_blocks, block_size, shared_memory>>>(a, x_device, y_device, vector_size); } copy_from_device(y_device, y, vector_size); } if(debug) { fprint_mat(f_axy, y, vector_size); fclose(f_x); fclose(f_y); fclose(f_axy); fclose(f_a); } free(x); free(y); return 0; } void fprint_mat(FILE *f, float *v, int n) { for(int i = 0; i < n-1; i++) { fprintf(f, "%f ", v[i]); } fprintf(f, "%f", v[n-1]); } void alloc_on_device(float *x, float *y, void **x_ptr, void **y_ptr, int n) { size_t size = sizeof(float) * n; CUDA_CHECK_RETURN(hipMalloc(x_ptr, size)); CUDA_CHECK_RETURN(hipMalloc(y_ptr, size)); CUDA_CHECK_RETURN(hipMemcpy(*x_ptr, x, size, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy(*y_ptr, y, size, hipMemcpyHostToDevice)); } void copy_from_device(void *v_ptr, float *v, int n) { hipMemcpy(v, v_ptr, sizeof(float) * n, hipMemcpyDeviceToHost); } void free_on_device(void *x_ptr, void *y_ptr) { hipFree(x_ptr); hipFree(y_ptr); } void a_x_mais_y_host(int a, float *x, float* y, int n) { for(int i = 0; i < n; ++i) { y[i] = (a * x[i]) + y[i]; } } float* init(int n) { float *v = (float*) malloc(sizeof(float) * n); for(int i = 0; i < n; ++i) { v[i] = (float) i; } return v; } //////////////////////////////////////////////////////////////////////////////// __global__ void a_x_mais_y_device(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < n) { y[i] = a * x[i] + y[i]; } } __global__ void a_x_mais_y_device_coalesce(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; int ti = threadIdx.x; extern __shared__ float sharedX[]; float *sharedY = &sharedX[0] + blockDim.x; if(i < n) { sharedX[ti] = x[i]; sharedY[ti] = y[i]; y[i] = a * sharedX[ti] + sharedY[ti]; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <hip/hip_runtime.h> #include <cstdio> #include <cstdlib> #include <cstring> #include <ctime> /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } void alloc_on_device(float *, float*, void **, void **, int); void free_on_device(void *, void*); void copy_from_device(void *, float*, int); void fprint_mat(FILE *, float *, int); void a_x_mais_y_host(int, float *, float*, int); float* init(int); __global__ void a_x_mais_y_device(int, float* , float*, int); __global__ void a_x_mais_y_device_coalesce(int, float*, float*, int); const int SEQUENTIAL = 1; const int CUDA = 2; const int CUDA_COALESCE = 4; /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char ** argv) { if(argc < 4) { printf("Uso: %s <algoritmo> <tamanho_vetor> <tamanho_bloco> <a opcional> <debug opcional>\n\ algoritmo:\n\ts (Sequencial)\n\tc (Cuda nao coalescente)\n\tcc (Cuda coalescente)\n", argv[0]); return -1; } const char * mode = (argc > 1 ? argv[1] : ""); const int vector_size = (argc > 2 ? atoi(argv[2]) : 0); const int block_size = (argc > 3 ? atoi(argv[3]) : 0); const int debug = argc > 4 ? strcmp(argv[4], "debug") == 0 : 0; srand(time(NULL)); int state = -1; float *x = init(vector_size); float *y = init(vector_size); float *x_device, *y_device; int n_blocks = (vector_size + (block_size - 1)) / block_size; int shared_memory = block_size * 2 * sizeof(float); float a = 0.0f; FILE *f_a; FILE *f_x; FILE *f_y; FILE *f_axy; if(argc > 4) { a = atof(argv[4]); } else { a = (float) (rand() % vector_size); } if(strncmp(mode, "s", 1) == 0) { state = SEQUENTIAL; if(debug) { f_a = fopen("y_s.txt", "w"); f_x = fopen("x_s.txt", "w"); f_y = fopen("y_s.txt", "w"); f_axy = fopen("axy_s.txt", "w"); } } else if(strlen(mode) == 1 && strncmp(mode, "c", 1) == 0) { state = CUDA; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_c.txt", "w"); f_y = fopen("y_c.txt", "w"); f_axy = fopen("axy_c.txt", "w"); } } else if(strncmp(mode, "cc", 2) == 0) { state = CUDA_COALESCE; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_cc.txt", "w"); f_y = fopen("y_cc.txt", "w"); f_axy = fopen("axy_cc.txt", "w"); } } if(debug) { fprintf(f_a, "%f", a); fprint_mat(f_x, x, vector_size); fprint_mat(f_y, y, vector_size); } if(state == SEQUENTIAL) { a_x_mais_y_host(a, x, y, vector_size); } else { alloc_on_device(x, y, (void**)&x_device, (void**) &y_device, vector_size); if(state == CUDA) { a_x_mais_y_device<<<n_blocks, block_size>>>(a, x_device, y_device, vector_size); } else if(state == CUDA_COALESCE) { a_x_mais_y_device_coalesce<<<n_blocks, block_size, shared_memory>>>(a, x_device, y_device, vector_size); } copy_from_device(y_device, y, vector_size); } if(debug) { fprint_mat(f_axy, y, vector_size); fclose(f_x); fclose(f_y); fclose(f_axy); fclose(f_a); } free(x); free(y); return 0; } void fprint_mat(FILE *f, float *v, int n) { for(int i = 0; i < n-1; i++) { fprintf(f, "%f ", v[i]); } fprintf(f, "%f", v[n-1]); } void alloc_on_device(float *x, float *y, void **x_ptr, void **y_ptr, int n) { size_t size = sizeof(float) * n; CUDA_CHECK_RETURN(hipMalloc(x_ptr, size)); CUDA_CHECK_RETURN(hipMalloc(y_ptr, size)); CUDA_CHECK_RETURN(hipMemcpy(*x_ptr, x, size, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy(*y_ptr, y, size, hipMemcpyHostToDevice)); } void copy_from_device(void *v_ptr, float *v, int n) { hipMemcpy(v, v_ptr, sizeof(float) * n, hipMemcpyDeviceToHost); } void free_on_device(void *x_ptr, void *y_ptr) { hipFree(x_ptr); hipFree(y_ptr); } void a_x_mais_y_host(int a, float *x, float* y, int n) { for(int i = 0; i < n; ++i) { y[i] = (a * x[i]) + y[i]; } } float* init(int n) { float *v = (float*) malloc(sizeof(float) * n); for(int i = 0; i < n; ++i) { v[i] = (float) i; } return v; } //////////////////////////////////////////////////////////////////////////////// __global__ void a_x_mais_y_device(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < n) { y[i] = a * x[i] + y[i]; } } __global__ void a_x_mais_y_device_coalesce(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; int ti = threadIdx.x; extern __shared__ float sharedX[]; float *sharedY = &sharedX[0] + blockDim.x; if(i < n) { sharedX[ti] = x[i]; sharedY[ti] = y[i]; y[i] = a * sharedX[ti] + sharedY[ti]; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17a_x_mais_y_deviceiPfS_i .globl _Z17a_x_mais_y_deviceiPfS_i .p2align 8 .type _Z17a_x_mais_y_deviceiPfS_i,@function _Z17a_x_mais_y_deviceiPfS_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x8 v_ashrrev_i32_e32 v2, 31, v1 s_load_b32 s0, s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo v_cvt_f32_i32_e32 v4, s0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[0:1], off s_waitcnt vmcnt(0) v_fmac_f32_e32 v3, v2, v4 global_store_b32 v[0:1], v3, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17a_x_mais_y_deviceiPfS_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17a_x_mais_y_deviceiPfS_i, .Lfunc_end0-_Z17a_x_mais_y_deviceiPfS_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z26a_x_mais_y_device_coalesceiPfS_i .globl _Z26a_x_mais_y_device_coalesceiPfS_i .p2align 8 .type _Z26a_x_mais_y_device_coalesceiPfS_i,@function _Z26a_x_mais_y_device_coalesceiPfS_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] v_cmp_gt_i32_e32 vcc_lo, s3, v1 s_and_saveexec_b32 s3, vcc_lo s_cbranch_execz .LBB1_2 s_load_b128 s[4:7], s[0:1], 0x8 v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b32_e32 v0, 2, v0 s_lshl_b32 s2, s2, 2 s_load_b32 s0, s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[1:2], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v3, vcc_lo, s4, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v4, vcc_lo, s5, v2, vcc_lo v_add_co_u32 v1, vcc_lo, s6, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo global_load_b32 v3, v[3:4], off global_load_b32 v4, v[1:2], off v_add_nc_u32_e32 v5, 0, v0 v_add3_u32 v0, 0, s2, v0 s_waitcnt vmcnt(1) ds_store_b32 v5, v3 s_waitcnt vmcnt(0) ds_store_b32 v0, v4 ds_load_b32 v0, v5 v_cvt_f32_i32_e32 v3, s0 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) v_fmac_f32_e32 v4, v0, v3 global_store_b32 v[1:2], v4, off .LBB1_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z26a_x_mais_y_device_coalesceiPfS_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z26a_x_mais_y_device_coalesceiPfS_i, .Lfunc_end1-_Z26a_x_mais_y_device_coalesceiPfS_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17a_x_mais_y_deviceiPfS_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17a_x_mais_y_deviceiPfS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims - .offset: 152 .size: 4 .value_kind: hidden_dynamic_lds_size .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z26a_x_mais_y_device_coalesceiPfS_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z26a_x_mais_y_device_coalesceiPfS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <hip/hip_runtime.h> #include <cstdio> #include <cstdlib> #include <cstring> #include <ctime> /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } void alloc_on_device(float *, float*, void **, void **, int); void free_on_device(void *, void*); void copy_from_device(void *, float*, int); void fprint_mat(FILE *, float *, int); void a_x_mais_y_host(int, float *, float*, int); float* init(int); __global__ void a_x_mais_y_device(int, float* , float*, int); __global__ void a_x_mais_y_device_coalesce(int, float*, float*, int); const int SEQUENTIAL = 1; const int CUDA = 2; const int CUDA_COALESCE = 4; /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char ** argv) { if(argc < 4) { printf("Uso: %s <algoritmo> <tamanho_vetor> <tamanho_bloco> <a opcional> <debug opcional>\n\ algoritmo:\n\ts (Sequencial)\n\tc (Cuda nao coalescente)\n\tcc (Cuda coalescente)\n", argv[0]); return -1; } const char * mode = (argc > 1 ? argv[1] : ""); const int vector_size = (argc > 2 ? atoi(argv[2]) : 0); const int block_size = (argc > 3 ? atoi(argv[3]) : 0); const int debug = argc > 4 ? strcmp(argv[4], "debug") == 0 : 0; srand(time(NULL)); int state = -1; float *x = init(vector_size); float *y = init(vector_size); float *x_device, *y_device; int n_blocks = (vector_size + (block_size - 1)) / block_size; int shared_memory = block_size * 2 * sizeof(float); float a = 0.0f; FILE *f_a; FILE *f_x; FILE *f_y; FILE *f_axy; if(argc > 4) { a = atof(argv[4]); } else { a = (float) (rand() % vector_size); } if(strncmp(mode, "s", 1) == 0) { state = SEQUENTIAL; if(debug) { f_a = fopen("y_s.txt", "w"); f_x = fopen("x_s.txt", "w"); f_y = fopen("y_s.txt", "w"); f_axy = fopen("axy_s.txt", "w"); } } else if(strlen(mode) == 1 && strncmp(mode, "c", 1) == 0) { state = CUDA; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_c.txt", "w"); f_y = fopen("y_c.txt", "w"); f_axy = fopen("axy_c.txt", "w"); } } else if(strncmp(mode, "cc", 2) == 0) { state = CUDA_COALESCE; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_cc.txt", "w"); f_y = fopen("y_cc.txt", "w"); f_axy = fopen("axy_cc.txt", "w"); } } if(debug) { fprintf(f_a, "%f", a); fprint_mat(f_x, x, vector_size); fprint_mat(f_y, y, vector_size); } if(state == SEQUENTIAL) { a_x_mais_y_host(a, x, y, vector_size); } else { alloc_on_device(x, y, (void**)&x_device, (void**) &y_device, vector_size); if(state == CUDA) { a_x_mais_y_device<<<n_blocks, block_size>>>(a, x_device, y_device, vector_size); } else if(state == CUDA_COALESCE) { a_x_mais_y_device_coalesce<<<n_blocks, block_size, shared_memory>>>(a, x_device, y_device, vector_size); } copy_from_device(y_device, y, vector_size); } if(debug) { fprint_mat(f_axy, y, vector_size); fclose(f_x); fclose(f_y); fclose(f_axy); fclose(f_a); } free(x); free(y); return 0; } void fprint_mat(FILE *f, float *v, int n) { for(int i = 0; i < n-1; i++) { fprintf(f, "%f ", v[i]); } fprintf(f, "%f", v[n-1]); } void alloc_on_device(float *x, float *y, void **x_ptr, void **y_ptr, int n) { size_t size = sizeof(float) * n; CUDA_CHECK_RETURN(hipMalloc(x_ptr, size)); CUDA_CHECK_RETURN(hipMalloc(y_ptr, size)); CUDA_CHECK_RETURN(hipMemcpy(*x_ptr, x, size, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy(*y_ptr, y, size, hipMemcpyHostToDevice)); } void copy_from_device(void *v_ptr, float *v, int n) { hipMemcpy(v, v_ptr, sizeof(float) * n, hipMemcpyDeviceToHost); } void free_on_device(void *x_ptr, void *y_ptr) { hipFree(x_ptr); hipFree(y_ptr); } void a_x_mais_y_host(int a, float *x, float* y, int n) { for(int i = 0; i < n; ++i) { y[i] = (a * x[i]) + y[i]; } } float* init(int n) { float *v = (float*) malloc(sizeof(float) * n); for(int i = 0; i < n; ++i) { v[i] = (float) i; } return v; } //////////////////////////////////////////////////////////////////////////////// __global__ void a_x_mais_y_device(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < n) { y[i] = a * x[i] + y[i]; } } __global__ void a_x_mais_y_device_coalesce(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; int ti = threadIdx.x; extern __shared__ float sharedX[]; float *sharedY = &sharedX[0] + blockDim.x; if(i < n) { sharedX[ti] = x[i]; sharedY[ti] = y[i]; y[i] = a * sharedX[ti] + sharedY[ti]; } }
.text .file "AXMaisY.hip" .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $232, %rsp .cfi_def_cfa_offset 288 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rsi, %r13 cmpl $3, %edi jg .LBB0_2 # %bb.1: movq (%r13), %rsi movl $.L.str, %edi xorl %eax, %eax callq printf movl $-1, %eax jmp .LBB0_56 .LBB0_2: movl %edi, %ebp movq 8(%r13), %r12 movq 16(%r13), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r14 movq 24(%r13), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, 32(%rsp) # 8-byte Spill movb $1, 7(%rsp) # 1-byte Folded Spill cmpl $4, %ebp je .LBB0_4 # %bb.3: movq 32(%r13), %rdi movl $.L.str.2, %esi callq strcmp testl %eax, %eax setne 7(%rsp) # 1-byte Folded Spill .LBB0_4: xorl %edi, %edi callq time movl %eax, %edi callq srand movslq %r14d, %r15 shlq $2, %r15 movq %r15, %rdi callq malloc movq %rax, %rbx testl %r14d, %r14d jle .LBB0_7 # %bb.5: # %.lr.ph.preheader.i movl %r14d, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB0_6: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %ecx, %xmm0 movss %xmm0, (%rbx,%rcx,4) incq %rcx cmpq %rcx, %rax jne .LBB0_6 .LBB0_7: # %_Z4initi.exit movq %r15, 224(%rsp) # 8-byte Spill movq %r15, %rdi callq malloc movq %rax, %r15 testl %r14d, %r14d jle .LBB0_10 # %bb.8: # %.lr.ph.preheader.i86 movl %r14d, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB0_9: # %.lr.ph.i88 # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %ecx, %xmm0 movss %xmm0, (%r15,%rcx,4) incq %rcx cmpq %rcx, %rax jne .LBB0_9 .LBB0_10: # %_Z4initi.exit92 movq 32(%rsp), %rcx # 8-byte Reload leal (%r14,%rcx), %eax decl %eax cltd idivl %ecx movl %eax, 28(%rsp) # 4-byte Spill cmpl $4, %ebp jne .LBB0_12 # %bb.11: callq rand cltd idivl %r14d xorps %xmm0, %xmm0 cvtsi2ss %edx, %xmm0 jmp .LBB0_13 .LBB0_12: movq 32(%r13), %rdi xorl %esi, %esi callq strtod cvtsd2ss %xmm0, %xmm0 .LBB0_13: movaps %xmm0, 112(%rsp) # 16-byte Spill movzbl (%r12), %ebp cmpb $115, %bpl movb %bpl, 15(%rsp) # 1-byte Spill jne .LBB0_16 # %bb.14: movzbl 7(%rsp), %r12d # 1-byte Folded Reload testb %r12b, %r12b je .LBB0_23 # %bb.15: movl $0, 8(%rsp) # 4-byte Folded Spill # implicit-def: $rbp # implicit-def: $r13 # implicit-def: $rax # kill: killed $rax # implicit-def: $rax # kill: killed $rax movl $0, 16(%rsp) # 4-byte Folded Spill jmp .LBB0_30 .LBB0_16: movq %r12, %rdi callq strlen cmpb $99, %bpl jne .LBB0_21 # %bb.17: cmpq $1, %rax jne .LBB0_21 # %bb.18: movl $0, 16(%rsp) # 4-byte Folded Spill movzbl 7(%rsp), %r12d # 1-byte Folded Reload movb $1, %al testb %r12b, %r12b je .LBB0_24 # %bb.19: movl %eax, 8(%rsp) # 4-byte Spill jmp .LBB0_20 .LBB0_21: movl $.L.str.12, %esi movl $2, %edx movq %r12, %rdi callq strncmp testl %eax, %eax je .LBB0_25 # %bb.22: movl $0, 8(%rsp) # 4-byte Folded Spill # implicit-def: $rbp # implicit-def: $r13 # implicit-def: $rax # kill: killed $rax # implicit-def: $rax # kill: killed $rax movl $0, 16(%rsp) # 4-byte Folded Spill jmp .LBB0_29 .LBB0_23: movl $0, 8(%rsp) # 4-byte Folded Spill movl $.L.str.7, %ebp movl $.L.str.6, %r12d movl $.L.str.4, %edi movl $.L.str.4, %r13d movl $0, 16(%rsp) # 4-byte Folded Spill jmp .LBB0_28 .LBB0_24: movl %eax, 8(%rsp) # 4-byte Spill movl $.L.str.11, %ebp movl $.L.str.10, %r12d movl $.L.str.9, %edi movl $.L.str.9, %r13d jmp .LBB0_28 .LBB0_25: movb $1, %al movl %eax, 16(%rsp) # 4-byte Spill movzbl 7(%rsp), %r12d # 1-byte Folded Reload testb %r12b, %r12b je .LBB0_27 # %bb.26: movl $0, 8(%rsp) # 4-byte Folded Spill .LBB0_20: # implicit-def: $rbp # implicit-def: $r13 # implicit-def: $rax # kill: killed $rax # implicit-def: $rax # kill: killed $rax jmp .LBB0_30 .LBB0_27: movl $0, 8(%rsp) # 4-byte Folded Spill movl $.L.str.15, %ebp movl $.L.str.14, %r13d movl $.L.str.13, %r12d movl $.L.str.9, %edi .LBB0_28: # %.sink.split movl $.L.str.5, %esi callq fopen movq %rax, 144(%rsp) # 8-byte Spill movl $.L.str.5, %esi movq %r12, %rdi callq fopen movq %rax, 136(%rsp) # 8-byte Spill movl $.L.str.5, %esi movq %r13, %rdi callq fopen movq %rax, %r13 movl $.L.str.5, %esi movq %rbp, %rdi callq fopen movq %rax, %rbp .LBB0_29: movzbl 7(%rsp), %r12d # 1-byte Folded Reload .LBB0_30: testb %r12b, %r12b movq %r13, 160(%rsp) # 8-byte Spill jne .LBB0_38 # %bb.31: movq %rbp, 216(%rsp) # 8-byte Spill movaps 112(%rsp), %xmm0 # 16-byte Reload cvtss2sd %xmm0, %xmm0 movl $.L.str.16, %esi movq 144(%rsp), %rdi # 8-byte Reload movb $1, %al callq fprintf leal -1(%r14), %r13d movq %r14, 152(%rsp) # 8-byte Spill cmpl $2, %r14d movq 136(%rsp), %rbp # 8-byte Reload jl .LBB0_34 # %bb.32: # %.lr.ph.preheader.i93 movl %r13d, %r14d xorl %r12d, %r12d .p2align 4, 0x90 .LBB0_33: # %.lr.ph.i95 # =>This Inner Loop Header: Depth=1 movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.17, %esi movq %rbp, %rdi movb $1, %al callq fprintf incq %r12 cmpq %r12, %r14 jne .LBB0_33 .LBB0_34: # %_Z10fprint_matP8_IO_FILEPfi.exit movslq %r13d, %r14 movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.16, %esi movq %rbp, %rdi movb $1, %al callq fprintf cmpl $2, 152(%rsp) # 4-byte Folded Reload movq 160(%rsp), %rbp # 8-byte Reload jl .LBB0_37 # %bb.35: # %.lr.ph.preheader.i99 movl %r13d, %r12d xorl %r13d, %r13d .p2align 4, 0x90 .LBB0_36: # %.lr.ph.i101 # =>This Inner Loop Header: Depth=1 movss (%r15,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.17, %esi movq %rbp, %rdi movb $1, %al callq fprintf incq %r13 cmpq %r13, %r12 jne .LBB0_36 .LBB0_37: # %_Z10fprint_matP8_IO_FILEPfi.exit105 movss (%r15,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.16, %esi movq %rbp, %rdi movb $1, %al callq fprintf movq 152(%rsp), %r14 # 8-byte Reload movzbl 7(%rsp), %r12d # 1-byte Folded Reload movq 216(%rsp), %rbp # 8-byte Reload .LBB0_38: cmpb $115, 15(%rsp) # 1-byte Folded Reload jne .LBB0_43 # %bb.39: testl %r14d, %r14d jle .LBB0_42 # %bb.40: # %.lr.ph.i106 cvttps2dq 112(%rsp), %xmm0 # 16-byte Folded Reload cvtdq2ps %xmm0, %xmm0 movl %r14d, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB0_41: # =>This Inner Loop Header: Depth=1 movss (%rbx,%rcx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss %xmm0, %xmm1 addss (%r15,%rcx,4), %xmm1 movss %xmm1, (%r15,%rcx,4) incq %rcx cmpq %rcx, %rax jne .LBB0_41 .LBB0_42: # %_Z15a_x_mais_y_hostiPfS_i.exit testb %r12b, %r12b je .LBB0_51 jmp .LBB0_55 .LBB0_43: leaq 168(%rsp), %rdx leaq 40(%rsp), %rcx movq %rbx, %rdi movq %r15, %rsi movl %r14d, %r8d callq _Z15alloc_on_devicePfS_PPvS1_i cmpb $0, 8(%rsp) # 1-byte Folded Reload je .LBB0_46 # %bb.44: movl 28(%rsp), %edi # 4-byte Reload movabsq $4294967296, %rax # imm = 0x100000000 orq %rax, %rdi movl 32(%rsp), %edx # 4-byte Reload orq %rax, %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB0_50 # %bb.45: cvttss2si 112(%rsp), %eax # 16-byte Folded Reload movq 168(%rsp), %rcx movq 40(%rsp), %rdx movl %eax, 24(%rsp) movq %rcx, 104(%rsp) movq %rdx, 96(%rsp) movl %r14d, 20(%rsp) leaq 24(%rsp), %rax movq %rax, 176(%rsp) leaq 104(%rsp), %rax movq %rax, 184(%rsp) leaq 96(%rsp), %rax movq %rax, 192(%rsp) leaq 20(%rsp), %rax movq %rax, 200(%rsp) leaq 80(%rsp), %rdi leaq 64(%rsp), %rsi leaq 56(%rsp), %rdx leaq 48(%rsp), %rcx callq __hipPopCallConfiguration movq 80(%rsp), %rsi movl 88(%rsp), %edx movq 64(%rsp), %rcx movl 72(%rsp), %r8d leaq 176(%rsp), %r9 movl $_Z17a_x_mais_y_deviceiPfS_i, %edi jmp .LBB0_49 .LBB0_46: cmpb $0, 16(%rsp) # 1-byte Folded Reload je .LBB0_50 # %bb.47: movq 32(%rsp), %rcx # 8-byte Reload leal (,%rcx,8), %eax movslq %eax, %r8 movl 28(%rsp), %edi # 4-byte Reload movabsq $4294967296, %rax # imm = 0x100000000 orq %rax, %rdi movl %ecx, %edx orq %rax, %rdx movl $1, %esi movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB0_50 # %bb.48: cvttss2si 112(%rsp), %eax # 16-byte Folded Reload movq 168(%rsp), %rcx movq 40(%rsp), %rdx movl %eax, 24(%rsp) movq %rcx, 104(%rsp) movq %rdx, 96(%rsp) movl %r14d, 20(%rsp) leaq 24(%rsp), %rax movq %rax, 176(%rsp) leaq 104(%rsp), %rax movq %rax, 184(%rsp) leaq 96(%rsp), %rax movq %rax, 192(%rsp) leaq 20(%rsp), %rax movq %rax, 200(%rsp) leaq 80(%rsp), %rdi leaq 64(%rsp), %rsi leaq 56(%rsp), %rdx leaq 48(%rsp), %rcx callq __hipPopCallConfiguration movq 80(%rsp), %rsi movl 88(%rsp), %edx movq 64(%rsp), %rcx movl 72(%rsp), %r8d leaq 176(%rsp), %r9 movl $_Z26a_x_mais_y_device_coalesceiPfS_i, %edi .LBB0_49: pushq 48(%rsp) .cfi_adjust_cfa_offset 8 pushq 64(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB0_50: movq 40(%rsp), %rsi movq %r15, %rdi movq 224(%rsp), %rdx # 8-byte Reload movl $2, %ecx callq hipMemcpy testb %r12b, %r12b jne .LBB0_55 .LBB0_51: movq %r14, %rax decl %r14d cmpl $2, %eax jl .LBB0_54 # %bb.52: # %.lr.ph.preheader.i117 movl %r14d, %r12d xorl %r13d, %r13d .p2align 4, 0x90 .LBB0_53: # %.lr.ph.i119 # =>This Inner Loop Header: Depth=1 movss (%r15,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.17, %esi movq %rbp, %rdi movb $1, %al callq fprintf incq %r13 cmpq %r13, %r12 jne .LBB0_53 .LBB0_54: # %_Z10fprint_matP8_IO_FILEPfi.exit123 movslq %r14d, %rax movss (%r15,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.16, %esi movq %rbp, %rdi movb $1, %al callq fprintf movq 136(%rsp), %rdi # 8-byte Reload callq fclose movq 160(%rsp), %rdi # 8-byte Reload callq fclose movq %rbp, %rdi callq fclose movq 144(%rsp), %rdi # 8-byte Reload callq fclose .LBB0_55: movq %rbx, %rdi callq free movq %r15, %rdi callq free xorl %eax, %eax .LBB0_56: addq $232, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size main, .Lfunc_end0-main .cfi_endproc # -- End function .globl _Z4initi # -- Begin function _Z4initi .p2align 4, 0x90 .type _Z4initi,@function _Z4initi: # @_Z4initi .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl %edi, %ebx movslq %edi, %r14 leaq (,%r14,4), %rdi callq malloc testl %r14d, %r14d jle .LBB1_3 # %bb.1: # %.lr.ph.preheader movl %ebx, %ecx xorl %edx, %edx .p2align 4, 0x90 .LBB1_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %edx, %xmm0 movss %xmm0, (%rax,%rdx,4) incq %rdx cmpq %rdx, %rcx jne .LBB1_2 .LBB1_3: # %._crit_edge addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z4initi, .Lfunc_end1-_Z4initi .cfi_endproc # -- End function .globl _Z10fprint_matP8_IO_FILEPfi # -- Begin function _Z10fprint_matP8_IO_FILEPfi .p2align 4, 0x90 .type _Z10fprint_matP8_IO_FILEPfi,@function _Z10fprint_matP8_IO_FILEPfi: # @_Z10fprint_matP8_IO_FILEPfi .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 # kill: def $edx killed $edx def $rdx movq %rsi, %r14 movq %rdi, %rbx leal -1(%rdx), %ebp cmpl $2, %edx jl .LBB2_3 # %bb.1: # %.lr.ph.preheader movl %ebp, %r15d xorl %r12d, %r12d .p2align 4, 0x90 .LBB2_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movss (%r14,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.17, %esi movq %rbx, %rdi movb $1, %al callq fprintf incq %r12 cmpq %r12, %r15 jne .LBB2_2 .LBB2_3: # %._crit_edge movslq %ebp, %rax movss (%r14,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.16, %esi movq %rbx, %rdi movb $1, %al popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp fprintf # TAILCALL .Lfunc_end2: .size _Z10fprint_matP8_IO_FILEPfi, .Lfunc_end2-_Z10fprint_matP8_IO_FILEPfi .cfi_endproc # -- End function .globl _Z15a_x_mais_y_hostiPfS_i # -- Begin function _Z15a_x_mais_y_hostiPfS_i .p2align 4, 0x90 .type _Z15a_x_mais_y_hostiPfS_i,@function _Z15a_x_mais_y_hostiPfS_i: # @_Z15a_x_mais_y_hostiPfS_i .cfi_startproc # %bb.0: testl %ecx, %ecx jle .LBB3_3 # %bb.1: # %.lr.ph cvtsi2ss %edi, %xmm0 movl %ecx, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB3_2: # =>This Inner Loop Header: Depth=1 movss (%rsi,%rcx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss %xmm0, %xmm1 addss (%rdx,%rcx,4), %xmm1 movss %xmm1, (%rdx,%rcx,4) incq %rcx cmpq %rcx, %rax jne .LBB3_2 .LBB3_3: # %._crit_edge retq .Lfunc_end3: .size _Z15a_x_mais_y_hostiPfS_i, .Lfunc_end3-_Z15a_x_mais_y_hostiPfS_i .cfi_endproc # -- End function .globl _Z15alloc_on_devicePfS_PPvS1_i # -- Begin function _Z15alloc_on_devicePfS_PPvS1_i .p2align 4, 0x90 .type _Z15alloc_on_devicePfS_PPvS1_i,@function _Z15alloc_on_devicePfS_PPvS1_i: # @_Z15alloc_on_devicePfS_PPvS1_i .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rcx, %r15 movq %rdx, %r13 movq %rsi, %rbx movq %rdi, %r12 movslq %r8d, %r14 shlq $2, %r14 movq %rdx, %rdi movq %r14, %rsi callq hipMalloc testl %eax, %eax jne .LBB4_1 # %bb.3: movq %r15, %rdi movq %r14, %rsi callq hipMalloc testl %eax, %eax jne .LBB4_4 # %bb.5: movq (%r13), %rdi movq %r12, %rsi movq %r14, %rdx movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB4_6 # %bb.7: movq (%r15), %rdi movq %rbx, %rsi movq %r14, %rdx movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB4_8 # %bb.9: popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB4_1: .cfi_def_cfa_offset 48 movq stderr(%rip), %rbx movl %eax, %edi callq hipGetErrorString movl $.L.str.18, %esi movl $.L.str.19, %r8d movq %rbx, %rdi movq %rax, %rdx movl $151, %ecx jmp .LBB4_2 .LBB4_4: movq stderr(%rip), %rbx movl %eax, %edi callq hipGetErrorString movl $.L.str.18, %esi movl $.L.str.19, %r8d movq %rbx, %rdi movq %rax, %rdx movl $152, %ecx jmp .LBB4_2 .LBB4_6: movq stderr(%rip), %rbx movl %eax, %edi callq hipGetErrorString movl $.L.str.18, %esi movl $.L.str.19, %r8d movq %rbx, %rdi movq %rax, %rdx movl $153, %ecx jmp .LBB4_2 .LBB4_8: movq stderr(%rip), %rbx movl %eax, %edi callq hipGetErrorString movl $.L.str.18, %esi movl $.L.str.19, %r8d movq %rbx, %rdi movq %rax, %rdx movl $154, %ecx .LBB4_2: xorl %eax, %eax callq fprintf movl $1, %edi callq exit .Lfunc_end4: .size _Z15alloc_on_devicePfS_PPvS1_i, .Lfunc_end4-_Z15alloc_on_devicePfS_PPvS1_i .cfi_endproc # -- End function .globl _Z32__device_stub__a_x_mais_y_deviceiPfS_i # -- Begin function _Z32__device_stub__a_x_mais_y_deviceiPfS_i .p2align 4, 0x90 .type _Z32__device_stub__a_x_mais_y_deviceiPfS_i,@function _Z32__device_stub__a_x_mais_y_deviceiPfS_i: # @_Z32__device_stub__a_x_mais_y_deviceiPfS_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 12(%rsp) movq %rsi, 72(%rsp) movq %rdx, 64(%rsp) movl %ecx, 8(%rsp) leaq 12(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z17a_x_mais_y_deviceiPfS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end5: .size _Z32__device_stub__a_x_mais_y_deviceiPfS_i, .Lfunc_end5-_Z32__device_stub__a_x_mais_y_deviceiPfS_i .cfi_endproc # -- End function .globl _Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i # -- Begin function _Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i .p2align 4, 0x90 .type _Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i,@function _Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i: # @_Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 12(%rsp) movq %rsi, 72(%rsp) movq %rdx, 64(%rsp) movl %ecx, 8(%rsp) leaq 12(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z26a_x_mais_y_device_coalesceiPfS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end6: .size _Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i, .Lfunc_end6-_Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i .cfi_endproc # -- End function .globl _Z16copy_from_devicePvPfi # -- Begin function _Z16copy_from_devicePvPfi .p2align 4, 0x90 .type _Z16copy_from_devicePvPfi,@function _Z16copy_from_devicePvPfi: # @_Z16copy_from_devicePvPfi .cfi_startproc # %bb.0: movq %rdi, %rax movslq %edx, %rdx shlq $2, %rdx movq %rsi, %rdi movq %rax, %rsi movl $2, %ecx jmp hipMemcpy # TAILCALL .Lfunc_end7: .size _Z16copy_from_devicePvPfi, .Lfunc_end7-_Z16copy_from_devicePvPfi .cfi_endproc # -- End function .globl _Z14free_on_devicePvS_ # -- Begin function _Z14free_on_devicePvS_ .p2align 4, 0x90 .type _Z14free_on_devicePvS_,@function _Z14free_on_devicePvS_: # @_Z14free_on_devicePvS_ .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 movq %rsi, %rbx callq hipFree movq %rbx, %rdi popq %rbx .cfi_def_cfa_offset 8 jmp hipFree # TAILCALL .Lfunc_end8: .size _Z14free_on_devicePvS_, .Lfunc_end8-_Z14free_on_devicePvS_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB9_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB9_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17a_x_mais_y_deviceiPfS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z26a_x_mais_y_device_coalesceiPfS_i, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end9: .size __hip_module_ctor, .Lfunc_end9-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB10_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB10_2: retq .Lfunc_end10: .size __hip_module_dtor, .Lfunc_end10-__hip_module_dtor .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Uso: %s <algoritmo> <tamanho_vetor> <tamanho_bloco> <a opcional> <debug opcional>\nalgoritmo:\n\ts (Sequencial)\n\tc (Cuda nao coalescente)\n\tcc (Cuda coalescente)\n" .size .L.str, 159 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "debug" .size .L.str.2, 6 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "y_s.txt" .size .L.str.4, 8 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "w" .size .L.str.5, 2 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "x_s.txt" .size .L.str.6, 8 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "axy_s.txt" .size .L.str.7, 10 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz "y_c.txt" .size .L.str.9, 8 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "x_c.txt" .size .L.str.10, 8 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz "axy_c.txt" .size .L.str.11, 10 .type .L.str.12,@object # @.str.12 .L.str.12: .asciz "cc" .size .L.str.12, 3 .type .L.str.13,@object # @.str.13 .L.str.13: .asciz "x_cc.txt" .size .L.str.13, 9 .type .L.str.14,@object # @.str.14 .L.str.14: .asciz "y_cc.txt" .size .L.str.14, 9 .type .L.str.15,@object # @.str.15 .L.str.15: .asciz "axy_cc.txt" .size .L.str.15, 11 .type .L.str.16,@object # @.str.16 .L.str.16: .asciz "%f" .size .L.str.16, 3 .type _Z17a_x_mais_y_deviceiPfS_i,@object # @_Z17a_x_mais_y_deviceiPfS_i .section .rodata,"a",@progbits .globl _Z17a_x_mais_y_deviceiPfS_i .p2align 3, 0x0 _Z17a_x_mais_y_deviceiPfS_i: .quad _Z32__device_stub__a_x_mais_y_deviceiPfS_i .size _Z17a_x_mais_y_deviceiPfS_i, 8 .type _Z26a_x_mais_y_device_coalesceiPfS_i,@object # @_Z26a_x_mais_y_device_coalesceiPfS_i .globl _Z26a_x_mais_y_device_coalesceiPfS_i .p2align 3, 0x0 _Z26a_x_mais_y_device_coalesceiPfS_i: .quad _Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i .size _Z26a_x_mais_y_device_coalesceiPfS_i, 8 .type .L.str.17,@object # @.str.17 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.17: .asciz "%f " .size .L.str.17, 4 .type .L.str.18,@object # @.str.18 .L.str.18: .asciz "Error %s at line %d in file %s\n" .size .L.str.18, 32 .type .L.str.19,@object # @.str.19 .L.str.19: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/wakim/prog-gpu-cuda/master/AXMaisY/src/AXMaisY.hip" .size .L.str.19, 108 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z17a_x_mais_y_deviceiPfS_i" .size .L__unnamed_1, 28 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z26a_x_mais_y_device_coalesceiPfS_i" .size .L__unnamed_2, 37 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z32__device_stub__a_x_mais_y_deviceiPfS_i .addrsig_sym _Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z17a_x_mais_y_deviceiPfS_i .addrsig_sym _Z26a_x_mais_y_device_coalesceiPfS_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z26a_x_mais_y_device_coalesceiPfS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */ /* 0x000e280000002500 */ /*0020*/ S2R R11, SR_TID.X ; /* 0x00000000000b7919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R4, R4, c[0x0][0x0], R11 ; /* 0x0000000004047a24 */ /* 0x001fca00078e020b */ /*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x178], PT ; /* 0x00005e0004007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R2, R4, R7, c[0x0][0x168] ; /* 0x00005a0004027625 */ /* 0x000fc800078e0207 */ /*0090*/ IMAD.WIDE R4, R4, R7, c[0x0][0x170] ; /* 0x00005c0004047625 */ /* 0x000fe400078e0207 */ /*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */ /* 0x000ee2000c1e1900 */ /*00c0*/ SHF.L.U32 R0, R11, 0x2, RZ ; /* 0x000000020b007819 */ /* 0x000fca00000006ff */ /*00d0*/ IMAD R7, R7, c[0x0][0x0], R0 ; /* 0x0000000007077a24 */ /* 0x000fe400078e0200 */ /*00e0*/ I2F R0, c[0x0][0x160] ; /* 0x0000580000007b06 */ /* 0x000e220000201400 */ /*00f0*/ STS [R11.X4], R2 ; /* 0x000000020b007388 */ /* 0x004fe80000004800 */ /*0100*/ STS [R7], R6 ; /* 0x0000000607007388 */ /* 0x008fe80000000800 */ /*0110*/ LDS R9, [R11.X4] ; /* 0x000000000b097984 */ /* 0x000e240000004800 */ /*0120*/ FFMA R9, R9, R0, R6 ; /* 0x0000000009097223 */ /* 0x001fca0000000006 */ /*0130*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */ /* 0x000fe2000c101904 */ /*0140*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0150*/ BRA 0x150; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z17a_x_mais_y_deviceiPfS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x178], PT ; /* 0x00005e0004007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */ /* 0x000fc800078e0205 */ /*0090*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */ /* 0x000fe400078e0205 */ /*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ I2F R7, c[0x0][0x160] ; /* 0x0000580000077b06 */ /* 0x000ea40000201400 */ /*00d0*/ FFMA R7, R2, R7, R0 ; /* 0x0000000702077223 */ /* 0x004fca0000000000 */ /*00e0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x000fe2000c101904 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17a_x_mais_y_deviceiPfS_i .globl _Z17a_x_mais_y_deviceiPfS_i .p2align 8 .type _Z17a_x_mais_y_deviceiPfS_i,@function _Z17a_x_mais_y_deviceiPfS_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x8 v_ashrrev_i32_e32 v2, 31, v1 s_load_b32 s0, s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo v_cvt_f32_i32_e32 v4, s0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[0:1], off s_waitcnt vmcnt(0) v_fmac_f32_e32 v3, v2, v4 global_store_b32 v[0:1], v3, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17a_x_mais_y_deviceiPfS_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17a_x_mais_y_deviceiPfS_i, .Lfunc_end0-_Z17a_x_mais_y_deviceiPfS_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z26a_x_mais_y_device_coalesceiPfS_i .globl _Z26a_x_mais_y_device_coalesceiPfS_i .p2align 8 .type _Z26a_x_mais_y_device_coalesceiPfS_i,@function _Z26a_x_mais_y_device_coalesceiPfS_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] v_cmp_gt_i32_e32 vcc_lo, s3, v1 s_and_saveexec_b32 s3, vcc_lo s_cbranch_execz .LBB1_2 s_load_b128 s[4:7], s[0:1], 0x8 v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b32_e32 v0, 2, v0 s_lshl_b32 s2, s2, 2 s_load_b32 s0, s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[1:2], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v3, vcc_lo, s4, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v4, vcc_lo, s5, v2, vcc_lo v_add_co_u32 v1, vcc_lo, s6, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo global_load_b32 v3, v[3:4], off global_load_b32 v4, v[1:2], off v_add_nc_u32_e32 v5, 0, v0 v_add3_u32 v0, 0, s2, v0 s_waitcnt vmcnt(1) ds_store_b32 v5, v3 s_waitcnt vmcnt(0) ds_store_b32 v0, v4 ds_load_b32 v0, v5 v_cvt_f32_i32_e32 v3, s0 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) v_fmac_f32_e32 v4, v0, v3 global_store_b32 v[1:2], v4, off .LBB1_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z26a_x_mais_y_device_coalesceiPfS_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z26a_x_mais_y_device_coalesceiPfS_i, .Lfunc_end1-_Z26a_x_mais_y_device_coalesceiPfS_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17a_x_mais_y_deviceiPfS_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17a_x_mais_y_deviceiPfS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims - .offset: 152 .size: 4 .value_kind: hidden_dynamic_lds_size .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z26a_x_mais_y_device_coalesceiPfS_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z26a_x_mais_y_device_coalesceiPfS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0016d499_00000000-6_AXMaisY.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2066: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2066: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%f " .LC1: .string "%f" .text .globl _Z10fprint_matP8_IO_FILEPfi .type _Z10fprint_matP8_IO_FILEPfi, @function _Z10fprint_matP8_IO_FILEPfi: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movq %rdi, %rbp movq %rsi, %r15 movl %edx, %r14d cmpl $1, %edx jle .L4 movq %rsi, %rbx leal -2(%rdx), %eax leaq 4(%rsi,%rax,4), %r13 leaq .LC0(%rip), %r12 .L5: pxor %xmm0, %xmm0 cvtss2sd (%rbx), %xmm0 movq %r12, %rdx movl $2, %esi movq %rbp, %rdi movl $1, %eax call __fprintf_chk@PLT addq $4, %rbx cmpq %r13, %rbx jne .L5 .L4: movslq %r14d, %r14 pxor %xmm0, %xmm0 cvtss2sd -4(%r15,%r14,4), %xmm0 leaq .LC1(%rip), %rdx movl $2, %esi movq %rbp, %rdi movl $1, %eax call __fprintf_chk@PLT addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _Z10fprint_matP8_IO_FILEPfi, .-_Z10fprint_matP8_IO_FILEPfi .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC2: .string "/home/ubuntu/Datasets/stackv2/train-structured/wakim/prog-gpu-cuda/master/AXMaisY/src/AXMaisY.cu" .align 8 .LC3: .string "Error %s at line %d in file %s\n" .text .globl _Z15alloc_on_devicePfS_PPvS1_i .type _Z15alloc_on_devicePfS_PPvS1_i, @function _Z15alloc_on_devicePfS_PPvS1_i: .LFB2059: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 movq %rdi, %r13 movq %rsi, %r14 movq %rdx, %rbp movq %rcx, %r12 movslq %r8d, %rbx salq $2, %rbx movq %rbx, %rsi movq %rdx, %rdi call cudaMalloc@PLT testl %eax, %eax jne .L14 movq %rbx, %rsi movq %r12, %rdi call cudaMalloc@PLT testl %eax, %eax jne .L15 movq 0(%rbp), %rdi movl $1, %ecx movq %rbx, %rdx movq %r13, %rsi call cudaMemcpy@PLT testl %eax, %eax jne .L16 movq (%r12), %rdi movl $1, %ecx movq %rbx, %rdx movq %r14, %rsi call cudaMemcpy@PLT testl %eax, %eax jne .L17 popq %rbx .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx leaq .LC2(%rip), %r9 movl $149, %r8d leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L15: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx leaq .LC2(%rip), %r9 movl $150, %r8d leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L16: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx leaq .LC2(%rip), %r9 movl $151, %r8d leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L17: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx leaq .LC2(%rip), %r9 movl $152, %r8d leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .cfi_endproc .LFE2059: .size _Z15alloc_on_devicePfS_PPvS1_i, .-_Z15alloc_on_devicePfS_PPvS1_i .globl _Z16copy_from_devicePvPfi .type _Z16copy_from_devicePvPfi, @function _Z16copy_from_devicePvPfi: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq %rdi, %rax movq %rsi, %rdi movslq %edx, %rdx salq $2, %rdx movl $2, %ecx movq %rax, %rsi call cudaMemcpy@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _Z16copy_from_devicePvPfi, .-_Z16copy_from_devicePvPfi .globl _Z14free_on_devicePvS_ .type _Z14free_on_devicePvS_, @function _Z14free_on_devicePvS_: .LFB2061: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movq %rsi, %rbx call cudaFree@PLT movq %rbx, %rdi call cudaFree@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _Z14free_on_devicePvS_, .-_Z14free_on_devicePvS_ .globl _Z15a_x_mais_y_hostiPfS_i .type _Z15a_x_mais_y_hostiPfS_i, @function _Z15a_x_mais_y_hostiPfS_i: .LFB2062: .cfi_startproc endbr64 testl %ecx, %ecx jle .L22 movslq %ecx, %rcx salq $2, %rcx movl $0, %eax pxor %xmm1, %xmm1 cvtsi2ssl %edi, %xmm1 .L24: movaps %xmm1, %xmm0 mulss (%rsi,%rax), %xmm0 addss (%rdx,%rax), %xmm0 movss %xmm0, (%rdx,%rax) addq $4, %rax cmpq %rcx, %rax jne .L24 .L22: ret .cfi_endproc .LFE2062: .size _Z15a_x_mais_y_hostiPfS_i, .-_Z15a_x_mais_y_hostiPfS_i .globl _Z4initi .type _Z4initi, @function _Z4initi: .LFB2063: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movl %edi, %ebp movslq %edi, %rbx leaq 0(,%rbx,4), %rdi call malloc@PLT testl %ebp, %ebp jle .L26 movl $0, %edx .L28: pxor %xmm0, %xmm0 cvtsi2ssl %edx, %xmm0 movss %xmm0, (%rax,%rdx,4) addq $1, %rdx cmpq %rdx, %rbx jne .L28 .L26: addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _Z4initi, .-_Z4initi .globl _Z41__device_stub__Z17a_x_mais_y_deviceiPfS_iiPfS_i .type _Z41__device_stub__Z17a_x_mais_y_deviceiPfS_iiPfS_i, @function _Z41__device_stub__Z17a_x_mais_y_deviceiPfS_iiPfS_i: .LFB2088: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 28(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 24(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 24(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L35 .L31: movq 136(%rsp), %rax subq %fs:40, %rax jne .L36 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L35: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z17a_x_mais_y_deviceiPfS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L31 .L36: call __stack_chk_fail@PLT .cfi_endproc .LFE2088: .size _Z41__device_stub__Z17a_x_mais_y_deviceiPfS_iiPfS_i, .-_Z41__device_stub__Z17a_x_mais_y_deviceiPfS_iiPfS_i .globl _Z17a_x_mais_y_deviceiPfS_i .type _Z17a_x_mais_y_deviceiPfS_i, @function _Z17a_x_mais_y_deviceiPfS_i: .LFB2089: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z41__device_stub__Z17a_x_mais_y_deviceiPfS_iiPfS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _Z17a_x_mais_y_deviceiPfS_i, .-_Z17a_x_mais_y_deviceiPfS_i .globl _Z50__device_stub__Z26a_x_mais_y_device_coalesceiPfS_iiPfS_i .type _Z50__device_stub__Z26a_x_mais_y_device_coalesceiPfS_iiPfS_i, @function _Z50__device_stub__Z26a_x_mais_y_device_coalesceiPfS_iiPfS_i: .LFB2090: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 28(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 24(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 24(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L43 .L39: movq 136(%rsp), %rax subq %fs:40, %rax jne .L44 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L43: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z26a_x_mais_y_device_coalesceiPfS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L39 .L44: call __stack_chk_fail@PLT .cfi_endproc .LFE2090: .size _Z50__device_stub__Z26a_x_mais_y_device_coalesceiPfS_iiPfS_i, .-_Z50__device_stub__Z26a_x_mais_y_device_coalesceiPfS_iiPfS_i .globl _Z26a_x_mais_y_device_coalesceiPfS_i .type _Z26a_x_mais_y_device_coalesceiPfS_i, @function _Z26a_x_mais_y_device_coalesceiPfS_i: .LFB2091: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z50__device_stub__Z26a_x_mais_y_device_coalesceiPfS_iiPfS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2091: .size _Z26a_x_mais_y_device_coalesceiPfS_i, .-_Z26a_x_mais_y_device_coalesceiPfS_i .section .rodata.str1.8 .align 8 .LC4: .string "Uso: %s <algoritmo> <tamanho_vetor> <tamanho_bloco> <a opcional> <debug opcional>\nalgoritmo:\n\ts (Sequencial)\n\tc (Cuda nao coalescente)\n\tcc (Cuda coalescente)\n" .section .rodata.str1.1 .LC5: .string "debug" .LC6: .string "w" .LC7: .string "y_s.txt" .LC8: .string "x_s.txt" .LC9: .string "axy_s.txt" .LC10: .string "y_c.txt" .LC11: .string "x_c.txt" .LC12: .string "axy_c.txt" .LC13: .string "cc" .LC14: .string "x_cc.txt" .LC15: .string "y_cc.txt" .LC16: .string "axy_cc.txt" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $120, %rsp .cfi_def_cfa_offset 176 movq %rsi, %rbx movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax cmpl $3, %edi jle .L71 movl %edi, %ebp movq 8(%rsi), %r14 movq 16(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, 24(%rsp) movl %eax, %r12d movq 24(%rbx), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, 16(%rsp) cmpl $4, %ebp jle .L50 movq 32(%rbx), %rdi leaq .LC5(%rip), %rsi call strcmp@PLT testl %eax, %eax sete %r15b movzbl %r15b, %r15d movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT movl %r12d, %edi call _Z4initi movq %rax, %r13 movl %r12d, %edi call _Z4initi movq %rax, %rbp movq 32(%rbx), %rdi movl $0, %esi call strtod@PLT pxor %xmm1, %xmm1 cvtsd2ss %xmm0, %xmm1 movss %xmm1, 12(%rsp) jmp .L65 .L71: movq (%rsi), %rdx leaq .LC4(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $-1, %eax jmp .L47 .L77: testl %r15d, %r15d jne .L72 .L52: cvttss2sil 12(%rsp), %edi movl %r12d, %ecx movq %rbp, %rdx movq %r13, %rsi call _Z15a_x_mais_y_hostiPfS_i jmp .L58 .L72: leaq .LC6(%rip), %rbx movq %rbx, %rsi leaq .LC7(%rip), %r14 movq %r14, %rdi call fopen@PLT movq %rax, 32(%rsp) movq %rbx, %rsi leaq .LC8(%rip), %rdi call fopen@PLT movq %rax, 40(%rsp) movq %rbx, %rsi movq %r14, %rdi call fopen@PLT movq %rax, 48(%rsp) movq %rbx, %rsi leaq .LC9(%rip), %rdi call fopen@PLT movq %rax, 56(%rsp) movl %r15d, %ebx .L64: pxor %xmm0, %xmm0 cvtss2sd 12(%rsp), %xmm0 leaq .LC1(%rip), %rdx movl $2, %esi movq 32(%rsp), %rdi movl $1, %eax call __fprintf_chk@PLT movl %r12d, %edx movq %r13, %rsi movq 40(%rsp), %rdi call _Z10fprint_matP8_IO_FILEPfi movl %r12d, %edx movq %rbp, %rsi movq 48(%rsp), %rdi call _Z10fprint_matP8_IO_FILEPfi cmpl $1, %ebx je .L52 .L57: movq 16(%rsp), %rcx movq 24(%rsp), %rsi leal -1(%rcx,%rsi), %eax cltd idivl %ecx movl %eax, %r14d leaq 72(%rsp), %rcx leaq 64(%rsp), %rdx movl %esi, %r8d movq %rbp, %rsi movq %r13, %rdi call _Z15alloc_on_devicePfS_PPvS1_i cmpl $2, %ebx je .L73 cmpl $4, %ebx je .L74 .L61: movl %r12d, %edx movq %rbp, %rsi movq 72(%rsp), %rdi call _Z16copy_from_devicePvPfi .L58: testl %r15d, %r15d jne .L75 .L63: movq %r13, %rdi call free@PLT movq %rbp, %rdi call free@PLT movl $0, %eax .L47: movq 104(%rsp), %rdx subq %fs:40, %rdx jne .L76 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L78: .cfi_restore_state movl $2, %ebx testl %r15d, %r15d je .L57 leaq .LC6(%rip), %rbx movq %rbx, %rsi leaq .LC10(%rip), %r14 movq %r14, %rdi call fopen@PLT movq %rax, 32(%rsp) movq %rbx, %rsi leaq .LC11(%rip), %rdi call fopen@PLT movq %rax, 40(%rsp) movq %rbx, %rsi movq %r14, %rdi call fopen@PLT movq %rax, 48(%rsp) movq %rbx, %rsi leaq .LC12(%rip), %rdi call fopen@PLT movq %rax, 56(%rsp) movl $2, %ebx jmp .L64 .L69: movl $-1, %ebx jmp .L64 .L73: movl 16(%rsp), %eax movl %eax, 92(%rsp) movl $1, 96(%rsp) movl $1, 100(%rsp) movl %r14d, 80(%rsp) movl $1, 84(%rsp) movl $1, 88(%rsp) movl $0, %r9d movl $0, %r8d movq 92(%rsp), %rdx movl $1, %ecx movq 80(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L61 cvttss2sil 12(%rsp), %edi movl %r12d, %ecx movq 72(%rsp), %rdx movq 64(%rsp), %rsi call _Z41__device_stub__Z17a_x_mais_y_deviceiPfS_iiPfS_i jmp .L61 .L74: movq 16(%rsp), %rax movl %eax, 92(%rsp) movl $1, 96(%rsp) movl $1, 100(%rsp) movl %r14d, 80(%rsp) movl $1, 84(%rsp) movl $1, 88(%rsp) movl %eax, %r8d sall $3, %r8d movl $0, %r9d movslq %r8d, %r8 movq 92(%rsp), %rdx movl $1, %ecx movq 80(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L61 cvttss2sil 12(%rsp), %edi movl %r12d, %ecx movq 72(%rsp), %rdx movq 64(%rsp), %rsi call _Z50__device_stub__Z26a_x_mais_y_device_coalesceiPfS_iiPfS_i jmp .L61 .L75: movl %r12d, %edx movq %rbp, %rsi movq 56(%rsp), %rbx movq %rbx, %rdi call _Z10fprint_matP8_IO_FILEPfi movq 40(%rsp), %rdi call fclose@PLT movq 48(%rsp), %rdi call fclose@PLT movq %rbx, %rdi call fclose@PLT movq 32(%rsp), %rdi call fclose@PLT jmp .L63 .L56: testl %r15d, %r15d jne .L69 leaq 72(%rsp), %rcx leaq 64(%rsp), %rdx movl 24(%rsp), %r8d movq %rbp, %rsi movq %r13, %rdi call _Z15alloc_on_devicePfS_PPvS1_i jmp .L61 .L50: movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT movl %r12d, %edi call _Z4initi movq %rax, %r13 movl %r12d, %edi call _Z4initi movq %rax, %rbp call rand@PLT cltd idivl %r12d pxor %xmm2, %xmm2 cvtsi2ssl %edx, %xmm2 movss %xmm2, 12(%rsp) movl $0, %r15d .L65: movzbl (%r14), %ebx cmpb $115, %bl je .L77 movq %r14, %rdi call strlen@PLT cmpb $99, %bl jne .L54 cmpq $1, %rax je .L78 .L54: movl $2, %edx leaq .LC13(%rip), %rsi movq %r14, %rdi call strncmp@PLT testl %eax, %eax jne .L56 movl $4, %ebx testl %r15d, %r15d je .L57 leaq .LC6(%rip), %rbx movq %rbx, %rsi leaq .LC10(%rip), %rdi call fopen@PLT movq %rax, 32(%rsp) movq %rbx, %rsi leaq .LC14(%rip), %rdi call fopen@PLT movq %rax, 40(%rsp) movq %rbx, %rsi leaq .LC15(%rip), %rdi call fopen@PLT movq %rax, 48(%rsp) movq %rbx, %rsi leaq .LC16(%rip), %rdi call fopen@PLT movq %rax, 56(%rsp) movl $4, %ebx jmp .L64 .L76: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.8 .align 8 .LC17: .string "_Z26a_x_mais_y_device_coalesceiPfS_i" .section .rodata.str1.1 .LC18: .string "_Z17a_x_mais_y_deviceiPfS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2093: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC17(%rip), %rdx movq %rdx, %rcx leaq _Z26a_x_mais_y_device_coalesceiPfS_i(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC18(%rip), %rdx movq %rdx, %rcx leaq _Z17a_x_mais_y_deviceiPfS_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2093: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "AXMaisY.hip" .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $232, %rsp .cfi_def_cfa_offset 288 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rsi, %r13 cmpl $3, %edi jg .LBB0_2 # %bb.1: movq (%r13), %rsi movl $.L.str, %edi xorl %eax, %eax callq printf movl $-1, %eax jmp .LBB0_56 .LBB0_2: movl %edi, %ebp movq 8(%r13), %r12 movq 16(%r13), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r14 movq 24(%r13), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, 32(%rsp) # 8-byte Spill movb $1, 7(%rsp) # 1-byte Folded Spill cmpl $4, %ebp je .LBB0_4 # %bb.3: movq 32(%r13), %rdi movl $.L.str.2, %esi callq strcmp testl %eax, %eax setne 7(%rsp) # 1-byte Folded Spill .LBB0_4: xorl %edi, %edi callq time movl %eax, %edi callq srand movslq %r14d, %r15 shlq $2, %r15 movq %r15, %rdi callq malloc movq %rax, %rbx testl %r14d, %r14d jle .LBB0_7 # %bb.5: # %.lr.ph.preheader.i movl %r14d, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB0_6: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %ecx, %xmm0 movss %xmm0, (%rbx,%rcx,4) incq %rcx cmpq %rcx, %rax jne .LBB0_6 .LBB0_7: # %_Z4initi.exit movq %r15, 224(%rsp) # 8-byte Spill movq %r15, %rdi callq malloc movq %rax, %r15 testl %r14d, %r14d jle .LBB0_10 # %bb.8: # %.lr.ph.preheader.i86 movl %r14d, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB0_9: # %.lr.ph.i88 # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %ecx, %xmm0 movss %xmm0, (%r15,%rcx,4) incq %rcx cmpq %rcx, %rax jne .LBB0_9 .LBB0_10: # %_Z4initi.exit92 movq 32(%rsp), %rcx # 8-byte Reload leal (%r14,%rcx), %eax decl %eax cltd idivl %ecx movl %eax, 28(%rsp) # 4-byte Spill cmpl $4, %ebp jne .LBB0_12 # %bb.11: callq rand cltd idivl %r14d xorps %xmm0, %xmm0 cvtsi2ss %edx, %xmm0 jmp .LBB0_13 .LBB0_12: movq 32(%r13), %rdi xorl %esi, %esi callq strtod cvtsd2ss %xmm0, %xmm0 .LBB0_13: movaps %xmm0, 112(%rsp) # 16-byte Spill movzbl (%r12), %ebp cmpb $115, %bpl movb %bpl, 15(%rsp) # 1-byte Spill jne .LBB0_16 # %bb.14: movzbl 7(%rsp), %r12d # 1-byte Folded Reload testb %r12b, %r12b je .LBB0_23 # %bb.15: movl $0, 8(%rsp) # 4-byte Folded Spill # implicit-def: $rbp # implicit-def: $r13 # implicit-def: $rax # kill: killed $rax # implicit-def: $rax # kill: killed $rax movl $0, 16(%rsp) # 4-byte Folded Spill jmp .LBB0_30 .LBB0_16: movq %r12, %rdi callq strlen cmpb $99, %bpl jne .LBB0_21 # %bb.17: cmpq $1, %rax jne .LBB0_21 # %bb.18: movl $0, 16(%rsp) # 4-byte Folded Spill movzbl 7(%rsp), %r12d # 1-byte Folded Reload movb $1, %al testb %r12b, %r12b je .LBB0_24 # %bb.19: movl %eax, 8(%rsp) # 4-byte Spill jmp .LBB0_20 .LBB0_21: movl $.L.str.12, %esi movl $2, %edx movq %r12, %rdi callq strncmp testl %eax, %eax je .LBB0_25 # %bb.22: movl $0, 8(%rsp) # 4-byte Folded Spill # implicit-def: $rbp # implicit-def: $r13 # implicit-def: $rax # kill: killed $rax # implicit-def: $rax # kill: killed $rax movl $0, 16(%rsp) # 4-byte Folded Spill jmp .LBB0_29 .LBB0_23: movl $0, 8(%rsp) # 4-byte Folded Spill movl $.L.str.7, %ebp movl $.L.str.6, %r12d movl $.L.str.4, %edi movl $.L.str.4, %r13d movl $0, 16(%rsp) # 4-byte Folded Spill jmp .LBB0_28 .LBB0_24: movl %eax, 8(%rsp) # 4-byte Spill movl $.L.str.11, %ebp movl $.L.str.10, %r12d movl $.L.str.9, %edi movl $.L.str.9, %r13d jmp .LBB0_28 .LBB0_25: movb $1, %al movl %eax, 16(%rsp) # 4-byte Spill movzbl 7(%rsp), %r12d # 1-byte Folded Reload testb %r12b, %r12b je .LBB0_27 # %bb.26: movl $0, 8(%rsp) # 4-byte Folded Spill .LBB0_20: # implicit-def: $rbp # implicit-def: $r13 # implicit-def: $rax # kill: killed $rax # implicit-def: $rax # kill: killed $rax jmp .LBB0_30 .LBB0_27: movl $0, 8(%rsp) # 4-byte Folded Spill movl $.L.str.15, %ebp movl $.L.str.14, %r13d movl $.L.str.13, %r12d movl $.L.str.9, %edi .LBB0_28: # %.sink.split movl $.L.str.5, %esi callq fopen movq %rax, 144(%rsp) # 8-byte Spill movl $.L.str.5, %esi movq %r12, %rdi callq fopen movq %rax, 136(%rsp) # 8-byte Spill movl $.L.str.5, %esi movq %r13, %rdi callq fopen movq %rax, %r13 movl $.L.str.5, %esi movq %rbp, %rdi callq fopen movq %rax, %rbp .LBB0_29: movzbl 7(%rsp), %r12d # 1-byte Folded Reload .LBB0_30: testb %r12b, %r12b movq %r13, 160(%rsp) # 8-byte Spill jne .LBB0_38 # %bb.31: movq %rbp, 216(%rsp) # 8-byte Spill movaps 112(%rsp), %xmm0 # 16-byte Reload cvtss2sd %xmm0, %xmm0 movl $.L.str.16, %esi movq 144(%rsp), %rdi # 8-byte Reload movb $1, %al callq fprintf leal -1(%r14), %r13d movq %r14, 152(%rsp) # 8-byte Spill cmpl $2, %r14d movq 136(%rsp), %rbp # 8-byte Reload jl .LBB0_34 # %bb.32: # %.lr.ph.preheader.i93 movl %r13d, %r14d xorl %r12d, %r12d .p2align 4, 0x90 .LBB0_33: # %.lr.ph.i95 # =>This Inner Loop Header: Depth=1 movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.17, %esi movq %rbp, %rdi movb $1, %al callq fprintf incq %r12 cmpq %r12, %r14 jne .LBB0_33 .LBB0_34: # %_Z10fprint_matP8_IO_FILEPfi.exit movslq %r13d, %r14 movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.16, %esi movq %rbp, %rdi movb $1, %al callq fprintf cmpl $2, 152(%rsp) # 4-byte Folded Reload movq 160(%rsp), %rbp # 8-byte Reload jl .LBB0_37 # %bb.35: # %.lr.ph.preheader.i99 movl %r13d, %r12d xorl %r13d, %r13d .p2align 4, 0x90 .LBB0_36: # %.lr.ph.i101 # =>This Inner Loop Header: Depth=1 movss (%r15,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.17, %esi movq %rbp, %rdi movb $1, %al callq fprintf incq %r13 cmpq %r13, %r12 jne .LBB0_36 .LBB0_37: # %_Z10fprint_matP8_IO_FILEPfi.exit105 movss (%r15,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.16, %esi movq %rbp, %rdi movb $1, %al callq fprintf movq 152(%rsp), %r14 # 8-byte Reload movzbl 7(%rsp), %r12d # 1-byte Folded Reload movq 216(%rsp), %rbp # 8-byte Reload .LBB0_38: cmpb $115, 15(%rsp) # 1-byte Folded Reload jne .LBB0_43 # %bb.39: testl %r14d, %r14d jle .LBB0_42 # %bb.40: # %.lr.ph.i106 cvttps2dq 112(%rsp), %xmm0 # 16-byte Folded Reload cvtdq2ps %xmm0, %xmm0 movl %r14d, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB0_41: # =>This Inner Loop Header: Depth=1 movss (%rbx,%rcx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss %xmm0, %xmm1 addss (%r15,%rcx,4), %xmm1 movss %xmm1, (%r15,%rcx,4) incq %rcx cmpq %rcx, %rax jne .LBB0_41 .LBB0_42: # %_Z15a_x_mais_y_hostiPfS_i.exit testb %r12b, %r12b je .LBB0_51 jmp .LBB0_55 .LBB0_43: leaq 168(%rsp), %rdx leaq 40(%rsp), %rcx movq %rbx, %rdi movq %r15, %rsi movl %r14d, %r8d callq _Z15alloc_on_devicePfS_PPvS1_i cmpb $0, 8(%rsp) # 1-byte Folded Reload je .LBB0_46 # %bb.44: movl 28(%rsp), %edi # 4-byte Reload movabsq $4294967296, %rax # imm = 0x100000000 orq %rax, %rdi movl 32(%rsp), %edx # 4-byte Reload orq %rax, %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB0_50 # %bb.45: cvttss2si 112(%rsp), %eax # 16-byte Folded Reload movq 168(%rsp), %rcx movq 40(%rsp), %rdx movl %eax, 24(%rsp) movq %rcx, 104(%rsp) movq %rdx, 96(%rsp) movl %r14d, 20(%rsp) leaq 24(%rsp), %rax movq %rax, 176(%rsp) leaq 104(%rsp), %rax movq %rax, 184(%rsp) leaq 96(%rsp), %rax movq %rax, 192(%rsp) leaq 20(%rsp), %rax movq %rax, 200(%rsp) leaq 80(%rsp), %rdi leaq 64(%rsp), %rsi leaq 56(%rsp), %rdx leaq 48(%rsp), %rcx callq __hipPopCallConfiguration movq 80(%rsp), %rsi movl 88(%rsp), %edx movq 64(%rsp), %rcx movl 72(%rsp), %r8d leaq 176(%rsp), %r9 movl $_Z17a_x_mais_y_deviceiPfS_i, %edi jmp .LBB0_49 .LBB0_46: cmpb $0, 16(%rsp) # 1-byte Folded Reload je .LBB0_50 # %bb.47: movq 32(%rsp), %rcx # 8-byte Reload leal (,%rcx,8), %eax movslq %eax, %r8 movl 28(%rsp), %edi # 4-byte Reload movabsq $4294967296, %rax # imm = 0x100000000 orq %rax, %rdi movl %ecx, %edx orq %rax, %rdx movl $1, %esi movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB0_50 # %bb.48: cvttss2si 112(%rsp), %eax # 16-byte Folded Reload movq 168(%rsp), %rcx movq 40(%rsp), %rdx movl %eax, 24(%rsp) movq %rcx, 104(%rsp) movq %rdx, 96(%rsp) movl %r14d, 20(%rsp) leaq 24(%rsp), %rax movq %rax, 176(%rsp) leaq 104(%rsp), %rax movq %rax, 184(%rsp) leaq 96(%rsp), %rax movq %rax, 192(%rsp) leaq 20(%rsp), %rax movq %rax, 200(%rsp) leaq 80(%rsp), %rdi leaq 64(%rsp), %rsi leaq 56(%rsp), %rdx leaq 48(%rsp), %rcx callq __hipPopCallConfiguration movq 80(%rsp), %rsi movl 88(%rsp), %edx movq 64(%rsp), %rcx movl 72(%rsp), %r8d leaq 176(%rsp), %r9 movl $_Z26a_x_mais_y_device_coalesceiPfS_i, %edi .LBB0_49: pushq 48(%rsp) .cfi_adjust_cfa_offset 8 pushq 64(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB0_50: movq 40(%rsp), %rsi movq %r15, %rdi movq 224(%rsp), %rdx # 8-byte Reload movl $2, %ecx callq hipMemcpy testb %r12b, %r12b jne .LBB0_55 .LBB0_51: movq %r14, %rax decl %r14d cmpl $2, %eax jl .LBB0_54 # %bb.52: # %.lr.ph.preheader.i117 movl %r14d, %r12d xorl %r13d, %r13d .p2align 4, 0x90 .LBB0_53: # %.lr.ph.i119 # =>This Inner Loop Header: Depth=1 movss (%r15,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.17, %esi movq %rbp, %rdi movb $1, %al callq fprintf incq %r13 cmpq %r13, %r12 jne .LBB0_53 .LBB0_54: # %_Z10fprint_matP8_IO_FILEPfi.exit123 movslq %r14d, %rax movss (%r15,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.16, %esi movq %rbp, %rdi movb $1, %al callq fprintf movq 136(%rsp), %rdi # 8-byte Reload callq fclose movq 160(%rsp), %rdi # 8-byte Reload callq fclose movq %rbp, %rdi callq fclose movq 144(%rsp), %rdi # 8-byte Reload callq fclose .LBB0_55: movq %rbx, %rdi callq free movq %r15, %rdi callq free xorl %eax, %eax .LBB0_56: addq $232, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size main, .Lfunc_end0-main .cfi_endproc # -- End function .globl _Z4initi # -- Begin function _Z4initi .p2align 4, 0x90 .type _Z4initi,@function _Z4initi: # @_Z4initi .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl %edi, %ebx movslq %edi, %r14 leaq (,%r14,4), %rdi callq malloc testl %r14d, %r14d jle .LBB1_3 # %bb.1: # %.lr.ph.preheader movl %ebx, %ecx xorl %edx, %edx .p2align 4, 0x90 .LBB1_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %edx, %xmm0 movss %xmm0, (%rax,%rdx,4) incq %rdx cmpq %rdx, %rcx jne .LBB1_2 .LBB1_3: # %._crit_edge addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z4initi, .Lfunc_end1-_Z4initi .cfi_endproc # -- End function .globl _Z10fprint_matP8_IO_FILEPfi # -- Begin function _Z10fprint_matP8_IO_FILEPfi .p2align 4, 0x90 .type _Z10fprint_matP8_IO_FILEPfi,@function _Z10fprint_matP8_IO_FILEPfi: # @_Z10fprint_matP8_IO_FILEPfi .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 # kill: def $edx killed $edx def $rdx movq %rsi, %r14 movq %rdi, %rbx leal -1(%rdx), %ebp cmpl $2, %edx jl .LBB2_3 # %bb.1: # %.lr.ph.preheader movl %ebp, %r15d xorl %r12d, %r12d .p2align 4, 0x90 .LBB2_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movss (%r14,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.17, %esi movq %rbx, %rdi movb $1, %al callq fprintf incq %r12 cmpq %r12, %r15 jne .LBB2_2 .LBB2_3: # %._crit_edge movslq %ebp, %rax movss (%r14,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.16, %esi movq %rbx, %rdi movb $1, %al popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp fprintf # TAILCALL .Lfunc_end2: .size _Z10fprint_matP8_IO_FILEPfi, .Lfunc_end2-_Z10fprint_matP8_IO_FILEPfi .cfi_endproc # -- End function .globl _Z15a_x_mais_y_hostiPfS_i # -- Begin function _Z15a_x_mais_y_hostiPfS_i .p2align 4, 0x90 .type _Z15a_x_mais_y_hostiPfS_i,@function _Z15a_x_mais_y_hostiPfS_i: # @_Z15a_x_mais_y_hostiPfS_i .cfi_startproc # %bb.0: testl %ecx, %ecx jle .LBB3_3 # %bb.1: # %.lr.ph cvtsi2ss %edi, %xmm0 movl %ecx, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB3_2: # =>This Inner Loop Header: Depth=1 movss (%rsi,%rcx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss %xmm0, %xmm1 addss (%rdx,%rcx,4), %xmm1 movss %xmm1, (%rdx,%rcx,4) incq %rcx cmpq %rcx, %rax jne .LBB3_2 .LBB3_3: # %._crit_edge retq .Lfunc_end3: .size _Z15a_x_mais_y_hostiPfS_i, .Lfunc_end3-_Z15a_x_mais_y_hostiPfS_i .cfi_endproc # -- End function .globl _Z15alloc_on_devicePfS_PPvS1_i # -- Begin function _Z15alloc_on_devicePfS_PPvS1_i .p2align 4, 0x90 .type _Z15alloc_on_devicePfS_PPvS1_i,@function _Z15alloc_on_devicePfS_PPvS1_i: # @_Z15alloc_on_devicePfS_PPvS1_i .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rcx, %r15 movq %rdx, %r13 movq %rsi, %rbx movq %rdi, %r12 movslq %r8d, %r14 shlq $2, %r14 movq %rdx, %rdi movq %r14, %rsi callq hipMalloc testl %eax, %eax jne .LBB4_1 # %bb.3: movq %r15, %rdi movq %r14, %rsi callq hipMalloc testl %eax, %eax jne .LBB4_4 # %bb.5: movq (%r13), %rdi movq %r12, %rsi movq %r14, %rdx movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB4_6 # %bb.7: movq (%r15), %rdi movq %rbx, %rsi movq %r14, %rdx movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB4_8 # %bb.9: popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB4_1: .cfi_def_cfa_offset 48 movq stderr(%rip), %rbx movl %eax, %edi callq hipGetErrorString movl $.L.str.18, %esi movl $.L.str.19, %r8d movq %rbx, %rdi movq %rax, %rdx movl $151, %ecx jmp .LBB4_2 .LBB4_4: movq stderr(%rip), %rbx movl %eax, %edi callq hipGetErrorString movl $.L.str.18, %esi movl $.L.str.19, %r8d movq %rbx, %rdi movq %rax, %rdx movl $152, %ecx jmp .LBB4_2 .LBB4_6: movq stderr(%rip), %rbx movl %eax, %edi callq hipGetErrorString movl $.L.str.18, %esi movl $.L.str.19, %r8d movq %rbx, %rdi movq %rax, %rdx movl $153, %ecx jmp .LBB4_2 .LBB4_8: movq stderr(%rip), %rbx movl %eax, %edi callq hipGetErrorString movl $.L.str.18, %esi movl $.L.str.19, %r8d movq %rbx, %rdi movq %rax, %rdx movl $154, %ecx .LBB4_2: xorl %eax, %eax callq fprintf movl $1, %edi callq exit .Lfunc_end4: .size _Z15alloc_on_devicePfS_PPvS1_i, .Lfunc_end4-_Z15alloc_on_devicePfS_PPvS1_i .cfi_endproc # -- End function .globl _Z32__device_stub__a_x_mais_y_deviceiPfS_i # -- Begin function _Z32__device_stub__a_x_mais_y_deviceiPfS_i .p2align 4, 0x90 .type _Z32__device_stub__a_x_mais_y_deviceiPfS_i,@function _Z32__device_stub__a_x_mais_y_deviceiPfS_i: # @_Z32__device_stub__a_x_mais_y_deviceiPfS_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 12(%rsp) movq %rsi, 72(%rsp) movq %rdx, 64(%rsp) movl %ecx, 8(%rsp) leaq 12(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z17a_x_mais_y_deviceiPfS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end5: .size _Z32__device_stub__a_x_mais_y_deviceiPfS_i, .Lfunc_end5-_Z32__device_stub__a_x_mais_y_deviceiPfS_i .cfi_endproc # -- End function .globl _Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i # -- Begin function _Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i .p2align 4, 0x90 .type _Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i,@function _Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i: # @_Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 12(%rsp) movq %rsi, 72(%rsp) movq %rdx, 64(%rsp) movl %ecx, 8(%rsp) leaq 12(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z26a_x_mais_y_device_coalesceiPfS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end6: .size _Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i, .Lfunc_end6-_Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i .cfi_endproc # -- End function .globl _Z16copy_from_devicePvPfi # -- Begin function _Z16copy_from_devicePvPfi .p2align 4, 0x90 .type _Z16copy_from_devicePvPfi,@function _Z16copy_from_devicePvPfi: # @_Z16copy_from_devicePvPfi .cfi_startproc # %bb.0: movq %rdi, %rax movslq %edx, %rdx shlq $2, %rdx movq %rsi, %rdi movq %rax, %rsi movl $2, %ecx jmp hipMemcpy # TAILCALL .Lfunc_end7: .size _Z16copy_from_devicePvPfi, .Lfunc_end7-_Z16copy_from_devicePvPfi .cfi_endproc # -- End function .globl _Z14free_on_devicePvS_ # -- Begin function _Z14free_on_devicePvS_ .p2align 4, 0x90 .type _Z14free_on_devicePvS_,@function _Z14free_on_devicePvS_: # @_Z14free_on_devicePvS_ .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 movq %rsi, %rbx callq hipFree movq %rbx, %rdi popq %rbx .cfi_def_cfa_offset 8 jmp hipFree # TAILCALL .Lfunc_end8: .size _Z14free_on_devicePvS_, .Lfunc_end8-_Z14free_on_devicePvS_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB9_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB9_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17a_x_mais_y_deviceiPfS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z26a_x_mais_y_device_coalesceiPfS_i, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end9: .size __hip_module_ctor, .Lfunc_end9-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB10_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB10_2: retq .Lfunc_end10: .size __hip_module_dtor, .Lfunc_end10-__hip_module_dtor .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Uso: %s <algoritmo> <tamanho_vetor> <tamanho_bloco> <a opcional> <debug opcional>\nalgoritmo:\n\ts (Sequencial)\n\tc (Cuda nao coalescente)\n\tcc (Cuda coalescente)\n" .size .L.str, 159 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "debug" .size .L.str.2, 6 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "y_s.txt" .size .L.str.4, 8 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "w" .size .L.str.5, 2 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "x_s.txt" .size .L.str.6, 8 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "axy_s.txt" .size .L.str.7, 10 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz "y_c.txt" .size .L.str.9, 8 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "x_c.txt" .size .L.str.10, 8 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz "axy_c.txt" .size .L.str.11, 10 .type .L.str.12,@object # @.str.12 .L.str.12: .asciz "cc" .size .L.str.12, 3 .type .L.str.13,@object # @.str.13 .L.str.13: .asciz "x_cc.txt" .size .L.str.13, 9 .type .L.str.14,@object # @.str.14 .L.str.14: .asciz "y_cc.txt" .size .L.str.14, 9 .type .L.str.15,@object # @.str.15 .L.str.15: .asciz "axy_cc.txt" .size .L.str.15, 11 .type .L.str.16,@object # @.str.16 .L.str.16: .asciz "%f" .size .L.str.16, 3 .type _Z17a_x_mais_y_deviceiPfS_i,@object # @_Z17a_x_mais_y_deviceiPfS_i .section .rodata,"a",@progbits .globl _Z17a_x_mais_y_deviceiPfS_i .p2align 3, 0x0 _Z17a_x_mais_y_deviceiPfS_i: .quad _Z32__device_stub__a_x_mais_y_deviceiPfS_i .size _Z17a_x_mais_y_deviceiPfS_i, 8 .type _Z26a_x_mais_y_device_coalesceiPfS_i,@object # @_Z26a_x_mais_y_device_coalesceiPfS_i .globl _Z26a_x_mais_y_device_coalesceiPfS_i .p2align 3, 0x0 _Z26a_x_mais_y_device_coalesceiPfS_i: .quad _Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i .size _Z26a_x_mais_y_device_coalesceiPfS_i, 8 .type .L.str.17,@object # @.str.17 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.17: .asciz "%f " .size .L.str.17, 4 .type .L.str.18,@object # @.str.18 .L.str.18: .asciz "Error %s at line %d in file %s\n" .size .L.str.18, 32 .type .L.str.19,@object # @.str.19 .L.str.19: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/wakim/prog-gpu-cuda/master/AXMaisY/src/AXMaisY.hip" .size .L.str.19, 108 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z17a_x_mais_y_deviceiPfS_i" .size .L__unnamed_1, 28 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z26a_x_mais_y_device_coalesceiPfS_i" .size .L__unnamed_2, 37 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z32__device_stub__a_x_mais_y_deviceiPfS_i .addrsig_sym _Z41__device_stub__a_x_mais_y_device_coalesceiPfS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z17a_x_mais_y_deviceiPfS_i .addrsig_sym _Z26a_x_mais_y_device_coalesceiPfS_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
/* Numba requires return value to be passed as a pointer in the first argument. To compile: nvcc -arch=sm_20 -dc jitlink.cu -o jitlink.o */ #include <cstdio> extern "C" { __device__ int bar(int* retval, int a, int b){ /* Fill this function with anything */ /* Return 0 to indicate success */ return 0; } }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* Numba requires return value to be passed as a pointer in the first argument. To compile: nvcc -arch=sm_20 -dc jitlink.cu -o jitlink.o */ #include <cstdio> extern "C" { __device__ int bar(int* retval, int a, int b){ /* Fill this function with anything */ /* Return 0 to indicate success */ return 0; } }
.file "tmpxft_00071fce_00000000-6_jitlink.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl bar .type bar, @function bar: .LFB2057: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2057: .size bar, .-bar .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* Numba requires return value to be passed as a pointer in the first argument. To compile: nvcc -arch=sm_20 -dc jitlink.cu -o jitlink.o */ #include <cstdio> extern "C" { __device__ int bar(int* retval, int a, int b){ /* Fill this function with anything */ /* Return 0 to indicate success */ return 0; } }
/* Numba requires return value to be passed as a pointer in the first argument. To compile: nvcc -arch=sm_20 -dc jitlink.cu -o jitlink.o */ #include <hip/hip_runtime.h> #include <cstdio> extern "C" { __device__ int bar(int* retval, int a, int b){ /* Fill this function with anything */ /* Return 0 to indicate success */ return 0; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/* Numba requires return value to be passed as a pointer in the first argument. To compile: nvcc -arch=sm_20 -dc jitlink.cu -o jitlink.o */ #include <hip/hip_runtime.h> #include <cstdio> extern "C" { __device__ int bar(int* retval, int a, int b){ /* Fill this function with anything */ /* Return 0 to indicate success */ return 0; } }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* Numba requires return value to be passed as a pointer in the first argument. To compile: nvcc -arch=sm_20 -dc jitlink.cu -o jitlink.o */ #include <hip/hip_runtime.h> #include <cstdio> extern "C" { __device__ int bar(int* retval, int a, int b){ /* Fill this function with anything */ /* Return 0 to indicate success */ return 0; } }
.text .file "jitlink.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00071fce_00000000-6_jitlink.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl bar .type bar, @function bar: .LFB2057: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2057: .size bar, .-bar .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "jitlink.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cuda.h> #include <cuda_runtime.h> __global__ void addKernel01(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel02(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel03(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel04(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel05(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[i] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel06(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel07(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel08(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[32 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel09(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[40 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel10(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[16 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel11(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[8 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; }
.file "tmpxft_00173125_00000000-6_VectorAdd.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z36__device_stub__Z11addKernel01PiS_S_iPiS_S_i .type _Z36__device_stub__Z11addKernel01PiS_S_iPiS_S_i, @function _Z36__device_stub__Z11addKernel01PiS_S_iPiS_S_i: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11addKernel01PiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z36__device_stub__Z11addKernel01PiS_S_iPiS_S_i, .-_Z36__device_stub__Z11addKernel01PiS_S_iPiS_S_i .globl _Z11addKernel01PiS_S_i .type _Z11addKernel01PiS_S_i, @function _Z11addKernel01PiS_S_i: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z11addKernel01PiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z11addKernel01PiS_S_i, .-_Z11addKernel01PiS_S_i .globl _Z36__device_stub__Z11addKernel02PiS_S_iPiS_S_i .type _Z36__device_stub__Z11addKernel02PiS_S_iPiS_S_i, @function _Z36__device_stub__Z11addKernel02PiS_S_iPiS_S_i: .LFB2053: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11addKernel02PiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2053: .size _Z36__device_stub__Z11addKernel02PiS_S_iPiS_S_i, .-_Z36__device_stub__Z11addKernel02PiS_S_iPiS_S_i .globl _Z11addKernel02PiS_S_i .type _Z11addKernel02PiS_S_i, @function _Z11addKernel02PiS_S_i: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z11addKernel02PiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _Z11addKernel02PiS_S_i, .-_Z11addKernel02PiS_S_i .globl _Z36__device_stub__Z11addKernel03PiS_S_iPiS_S_i .type _Z36__device_stub__Z11addKernel03PiS_S_iPiS_S_i, @function _Z36__device_stub__Z11addKernel03PiS_S_iPiS_S_i: .LFB2055: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L23 .L19: movq 136(%rsp), %rax subq %fs:40, %rax jne .L24 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11addKernel03PiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L19 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE2055: .size _Z36__device_stub__Z11addKernel03PiS_S_iPiS_S_i, .-_Z36__device_stub__Z11addKernel03PiS_S_iPiS_S_i .globl _Z11addKernel03PiS_S_i .type _Z11addKernel03PiS_S_i, @function _Z11addKernel03PiS_S_i: .LFB2056: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z11addKernel03PiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2056: .size _Z11addKernel03PiS_S_i, .-_Z11addKernel03PiS_S_i .globl _Z36__device_stub__Z11addKernel04PiS_S_iPiS_S_i .type _Z36__device_stub__Z11addKernel04PiS_S_iPiS_S_i, @function _Z36__device_stub__Z11addKernel04PiS_S_iPiS_S_i: .LFB2057: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L31 .L27: movq 136(%rsp), %rax subq %fs:40, %rax jne .L32 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L31: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11addKernel04PiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L27 .L32: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z36__device_stub__Z11addKernel04PiS_S_iPiS_S_i, .-_Z36__device_stub__Z11addKernel04PiS_S_iPiS_S_i .globl _Z11addKernel04PiS_S_i .type _Z11addKernel04PiS_S_i, @function _Z11addKernel04PiS_S_i: .LFB2058: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z11addKernel04PiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _Z11addKernel04PiS_S_i, .-_Z11addKernel04PiS_S_i .globl _Z36__device_stub__Z11addKernel05PiS_S_iPiS_S_i .type _Z36__device_stub__Z11addKernel05PiS_S_iPiS_S_i, @function _Z36__device_stub__Z11addKernel05PiS_S_iPiS_S_i: .LFB2059: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L39 .L35: movq 136(%rsp), %rax subq %fs:40, %rax jne .L40 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L39: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11addKernel05PiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L35 .L40: call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size _Z36__device_stub__Z11addKernel05PiS_S_iPiS_S_i, .-_Z36__device_stub__Z11addKernel05PiS_S_iPiS_S_i .globl _Z11addKernel05PiS_S_i .type _Z11addKernel05PiS_S_i, @function _Z11addKernel05PiS_S_i: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z11addKernel05PiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _Z11addKernel05PiS_S_i, .-_Z11addKernel05PiS_S_i .globl _Z36__device_stub__Z11addKernel06PiS_S_iPiS_S_i .type _Z36__device_stub__Z11addKernel06PiS_S_iPiS_S_i, @function _Z36__device_stub__Z11addKernel06PiS_S_iPiS_S_i: .LFB2061: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L47 .L43: movq 136(%rsp), %rax subq %fs:40, %rax jne .L48 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L47: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11addKernel06PiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L43 .L48: call __stack_chk_fail@PLT .cfi_endproc .LFE2061: .size _Z36__device_stub__Z11addKernel06PiS_S_iPiS_S_i, .-_Z36__device_stub__Z11addKernel06PiS_S_iPiS_S_i .globl _Z11addKernel06PiS_S_i .type _Z11addKernel06PiS_S_i, @function _Z11addKernel06PiS_S_i: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z11addKernel06PiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _Z11addKernel06PiS_S_i, .-_Z11addKernel06PiS_S_i .globl _Z36__device_stub__Z11addKernel07PiS_S_iPiS_S_i .type _Z36__device_stub__Z11addKernel07PiS_S_iPiS_S_i, @function _Z36__device_stub__Z11addKernel07PiS_S_iPiS_S_i: .LFB2063: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L55 .L51: movq 136(%rsp), %rax subq %fs:40, %rax jne .L56 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L55: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11addKernel07PiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L51 .L56: call __stack_chk_fail@PLT .cfi_endproc .LFE2063: .size _Z36__device_stub__Z11addKernel07PiS_S_iPiS_S_i, .-_Z36__device_stub__Z11addKernel07PiS_S_iPiS_S_i .globl _Z11addKernel07PiS_S_i .type _Z11addKernel07PiS_S_i, @function _Z11addKernel07PiS_S_i: .LFB2064: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z11addKernel07PiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2064: .size _Z11addKernel07PiS_S_i, .-_Z11addKernel07PiS_S_i .globl _Z36__device_stub__Z11addKernel08PiS_S_iPiS_S_i .type _Z36__device_stub__Z11addKernel08PiS_S_iPiS_S_i, @function _Z36__device_stub__Z11addKernel08PiS_S_iPiS_S_i: .LFB2065: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L63 .L59: movq 136(%rsp), %rax subq %fs:40, %rax jne .L64 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L63: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11addKernel08PiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L59 .L64: call __stack_chk_fail@PLT .cfi_endproc .LFE2065: .size _Z36__device_stub__Z11addKernel08PiS_S_iPiS_S_i, .-_Z36__device_stub__Z11addKernel08PiS_S_iPiS_S_i .globl _Z11addKernel08PiS_S_i .type _Z11addKernel08PiS_S_i, @function _Z11addKernel08PiS_S_i: .LFB2066: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z11addKernel08PiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2066: .size _Z11addKernel08PiS_S_i, .-_Z11addKernel08PiS_S_i .globl _Z36__device_stub__Z11addKernel09PiS_S_iPiS_S_i .type _Z36__device_stub__Z11addKernel09PiS_S_iPiS_S_i, @function _Z36__device_stub__Z11addKernel09PiS_S_iPiS_S_i: .LFB2067: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L71 .L67: movq 136(%rsp), %rax subq %fs:40, %rax jne .L72 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L71: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11addKernel09PiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L67 .L72: call __stack_chk_fail@PLT .cfi_endproc .LFE2067: .size _Z36__device_stub__Z11addKernel09PiS_S_iPiS_S_i, .-_Z36__device_stub__Z11addKernel09PiS_S_iPiS_S_i .globl _Z11addKernel09PiS_S_i .type _Z11addKernel09PiS_S_i, @function _Z11addKernel09PiS_S_i: .LFB2068: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z11addKernel09PiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2068: .size _Z11addKernel09PiS_S_i, .-_Z11addKernel09PiS_S_i .globl _Z36__device_stub__Z11addKernel10PiS_S_iPiS_S_i .type _Z36__device_stub__Z11addKernel10PiS_S_iPiS_S_i, @function _Z36__device_stub__Z11addKernel10PiS_S_iPiS_S_i: .LFB2069: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L79 .L75: movq 136(%rsp), %rax subq %fs:40, %rax jne .L80 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L79: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11addKernel10PiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L75 .L80: call __stack_chk_fail@PLT .cfi_endproc .LFE2069: .size _Z36__device_stub__Z11addKernel10PiS_S_iPiS_S_i, .-_Z36__device_stub__Z11addKernel10PiS_S_iPiS_S_i .globl _Z11addKernel10PiS_S_i .type _Z11addKernel10PiS_S_i, @function _Z11addKernel10PiS_S_i: .LFB2070: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z11addKernel10PiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2070: .size _Z11addKernel10PiS_S_i, .-_Z11addKernel10PiS_S_i .globl _Z36__device_stub__Z11addKernel11PiS_S_iPiS_S_i .type _Z36__device_stub__Z11addKernel11PiS_S_iPiS_S_i, @function _Z36__device_stub__Z11addKernel11PiS_S_iPiS_S_i: .LFB2071: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L87 .L83: movq 136(%rsp), %rax subq %fs:40, %rax jne .L88 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L87: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11addKernel11PiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L83 .L88: call __stack_chk_fail@PLT .cfi_endproc .LFE2071: .size _Z36__device_stub__Z11addKernel11PiS_S_iPiS_S_i, .-_Z36__device_stub__Z11addKernel11PiS_S_iPiS_S_i .globl _Z11addKernel11PiS_S_i .type _Z11addKernel11PiS_S_i, @function _Z11addKernel11PiS_S_i: .LFB2072: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z11addKernel11PiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2072: .size _Z11addKernel11PiS_S_i, .-_Z11addKernel11PiS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z11addKernel11PiS_S_i" .LC1: .string "_Z11addKernel10PiS_S_i" .LC2: .string "_Z11addKernel09PiS_S_i" .LC3: .string "_Z11addKernel08PiS_S_i" .LC4: .string "_Z11addKernel07PiS_S_i" .LC5: .string "_Z11addKernel06PiS_S_i" .LC6: .string "_Z11addKernel05PiS_S_i" .LC7: .string "_Z11addKernel04PiS_S_i" .LC8: .string "_Z11addKernel03PiS_S_i" .LC9: .string "_Z11addKernel02PiS_S_i" .LC10: .string "_Z11addKernel01PiS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2074: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z11addKernel11PiS_S_i(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z11addKernel10PiS_S_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z11addKernel09PiS_S_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z11addKernel08PiS_S_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z11addKernel07PiS_S_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _Z11addKernel06PiS_S_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq _Z11addKernel05PiS_S_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z11addKernel04PiS_S_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq _Z11addKernel03PiS_S_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _Z11addKernel02PiS_S_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC10(%rip), %rdx movq %rdx, %rcx leaq _Z11addKernel01PiS_S_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2074: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cuda.h> #include <cuda_runtime.h> __global__ void addKernel01(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel02(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel03(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel04(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel05(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[i] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel06(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel07(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel08(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[32 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel09(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[40 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel10(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[16 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel11(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[8 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; }
#include <hip/hip_runtime.h> __global__ void addKernel01(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel02(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel03(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel04(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel05(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[i] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel06(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel07(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel08(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[32 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel09(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[40 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel10(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[16 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel11(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[8 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> __global__ void addKernel01(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel02(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel03(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel04(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel05(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[i] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel06(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel07(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel08(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[32 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel09(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[40 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel10(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[16 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel11(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[8 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z11addKernel01PiS_S_i .globl _Z11addKernel01PiS_S_i .p2align 8 .type _Z11addKernel01PiS_S_i,@function _Z11addKernel01PiS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB0_2 v_dual_mov_b32 v2, 1 :: v_dual_and_b32 v1, 63, v0 ds_store_b8 v1, v2 .LBB0_2: s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB0_5 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_and_b32_e32 v1, 63, v0 v_lshlrev_b32_e32 v4, 2, v0 ds_load_u8 v6, v1 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, s3, s6, v4 v_add_co_u32 v2, s0, s0, v4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, null, s1, 0, s0 v_add_co_u32 v4, s0, s4, v4 v_add_co_ci_u32_e64 v1, null, s7, 0, s3 v_add_co_ci_u32_e64 v5, null, s5, 0, s0 .LBB0_4: global_load_b32 v7, v[0:1], off global_load_b32 v8, v[2:3], off s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) v_add3_u32 v7, v8, v7, v6 global_store_b32 v[4:5], v7, off s_cbranch_scc0 .LBB0_4 .LBB0_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11addKernel01PiS_S_i .amdhsa_group_segment_fixed_size 49152 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z11addKernel01PiS_S_i, .Lfunc_end0-_Z11addKernel01PiS_S_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z11addKernel02PiS_S_i .globl _Z11addKernel02PiS_S_i .p2align 8 .type _Z11addKernel02PiS_S_i,@function _Z11addKernel02PiS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB1_2 v_dual_mov_b32 v2, 1 :: v_dual_and_b32 v1, 63, v0 ds_store_b8 v1, v2 .LBB1_2: s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB1_5 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_and_b32_e32 v1, 63, v0 v_lshlrev_b32_e32 v4, 2, v0 ds_load_u8 v6, v1 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, s3, s6, v4 v_add_co_u32 v2, s0, s0, v4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, null, s1, 0, s0 v_add_co_u32 v4, s0, s4, v4 v_add_co_ci_u32_e64 v1, null, s7, 0, s3 v_add_co_ci_u32_e64 v5, null, s5, 0, s0 .LBB1_4: global_load_b32 v7, v[0:1], off global_load_b32 v8, v[2:3], off s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) v_add3_u32 v7, v8, v7, v6 global_store_b32 v[4:5], v7, off s_cbranch_scc0 .LBB1_4 .LBB1_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11addKernel02PiS_S_i .amdhsa_group_segment_fixed_size 49152 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z11addKernel02PiS_S_i, .Lfunc_end1-_Z11addKernel02PiS_S_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z11addKernel03PiS_S_i .globl _Z11addKernel03PiS_S_i .p2align 8 .type _Z11addKernel03PiS_S_i,@function _Z11addKernel03PiS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB2_2 v_dual_mov_b32 v2, 1 :: v_dual_and_b32 v1, 63, v0 ds_store_b8 v1, v2 .LBB2_2: s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB2_5 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_and_b32_e32 v1, 63, v0 v_lshlrev_b32_e32 v4, 2, v0 ds_load_u8 v6, v1 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, s3, s6, v4 v_add_co_u32 v2, s0, s0, v4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, null, s1, 0, s0 v_add_co_u32 v4, s0, s4, v4 v_add_co_ci_u32_e64 v1, null, s7, 0, s3 v_add_co_ci_u32_e64 v5, null, s5, 0, s0 .LBB2_4: global_load_b32 v7, v[0:1], off global_load_b32 v8, v[2:3], off s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) v_add3_u32 v7, v8, v7, v6 global_store_b32 v[4:5], v7, off s_cbranch_scc0 .LBB2_4 .LBB2_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11addKernel03PiS_S_i .amdhsa_group_segment_fixed_size 49152 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z11addKernel03PiS_S_i, .Lfunc_end2-_Z11addKernel03PiS_S_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z11addKernel04PiS_S_i .globl _Z11addKernel04PiS_S_i .p2align 8 .type _Z11addKernel04PiS_S_i,@function _Z11addKernel04PiS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB3_2 v_dual_mov_b32 v2, 1 :: v_dual_and_b32 v1, 63, v0 ds_store_b8 v1, v2 .LBB3_2: s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB3_5 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_and_b32_e32 v1, 63, v0 v_lshlrev_b32_e32 v4, 2, v0 ds_load_u8 v6, v1 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, s3, s6, v4 v_add_co_u32 v2, s0, s0, v4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, null, s1, 0, s0 v_add_co_u32 v4, s0, s4, v4 v_add_co_ci_u32_e64 v1, null, s7, 0, s3 v_add_co_ci_u32_e64 v5, null, s5, 0, s0 .LBB3_4: global_load_b32 v7, v[0:1], off global_load_b32 v8, v[2:3], off s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) v_add3_u32 v7, v8, v7, v6 global_store_b32 v[4:5], v7, off s_cbranch_scc0 .LBB3_4 .LBB3_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11addKernel04PiS_S_i .amdhsa_group_segment_fixed_size 49152 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end3: .size _Z11addKernel04PiS_S_i, .Lfunc_end3-_Z11addKernel04PiS_S_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z11addKernel05PiS_S_i .globl _Z11addKernel05PiS_S_i .p2align 8 .type _Z11addKernel05PiS_S_i,@function _Z11addKernel05PiS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB4_2 v_dual_mov_b32 v2, 1 :: v_dual_and_b32 v1, 63, v0 ds_store_b8 v1, v2 .LBB4_2: s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB4_5 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_and_b32_e32 v1, 63, v0 v_lshlrev_b32_e32 v4, 2, v0 ds_load_u8 v6, v1 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, s3, s6, v4 v_add_co_u32 v2, s0, s0, v4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, null, s1, 0, s0 v_add_co_u32 v4, s0, s4, v4 v_add_co_ci_u32_e64 v1, null, s7, 0, s3 v_add_co_ci_u32_e64 v5, null, s5, 0, s0 .LBB4_4: global_load_b32 v7, v[0:1], off global_load_b32 v8, v[2:3], off s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) v_add3_u32 v7, v8, v7, v6 global_store_b32 v[4:5], v7, off s_cbranch_scc0 .LBB4_4 .LBB4_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11addKernel05PiS_S_i .amdhsa_group_segment_fixed_size 49152 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end4: .size _Z11addKernel05PiS_S_i, .Lfunc_end4-_Z11addKernel05PiS_S_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z11addKernel06PiS_S_i .globl _Z11addKernel06PiS_S_i .p2align 8 .type _Z11addKernel06PiS_S_i,@function _Z11addKernel06PiS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB5_2 v_dual_mov_b32 v2, 1 :: v_dual_and_b32 v1, 63, v0 ds_store_b8 v1, v2 .LBB5_2: s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB5_5 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_and_b32_e32 v1, 63, v0 v_lshlrev_b32_e32 v4, 2, v0 ds_load_u8 v6, v1 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, s3, s6, v4 v_add_co_u32 v2, s0, s0, v4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, null, s1, 0, s0 v_add_co_u32 v4, s0, s4, v4 v_add_co_ci_u32_e64 v1, null, s7, 0, s3 v_add_co_ci_u32_e64 v5, null, s5, 0, s0 .LBB5_4: global_load_b32 v7, v[0:1], off global_load_b32 v8, v[2:3], off s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) v_add3_u32 v7, v8, v7, v6 global_store_b32 v[4:5], v7, off s_cbranch_scc0 .LBB5_4 .LBB5_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11addKernel06PiS_S_i .amdhsa_group_segment_fixed_size 49152 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end5: .size _Z11addKernel06PiS_S_i, .Lfunc_end5-_Z11addKernel06PiS_S_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z11addKernel07PiS_S_i .globl _Z11addKernel07PiS_S_i .p2align 8 .type _Z11addKernel07PiS_S_i,@function _Z11addKernel07PiS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB6_2 v_dual_mov_b32 v2, 1 :: v_dual_and_b32 v1, 63, v0 ds_store_b8 v1, v2 .LBB6_2: s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB6_5 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_and_b32_e32 v1, 63, v0 v_lshlrev_b32_e32 v4, 2, v0 ds_load_u8 v6, v1 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, s3, s6, v4 v_add_co_u32 v2, s0, s0, v4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, null, s1, 0, s0 v_add_co_u32 v4, s0, s4, v4 v_add_co_ci_u32_e64 v1, null, s7, 0, s3 v_add_co_ci_u32_e64 v5, null, s5, 0, s0 .LBB6_4: global_load_b32 v7, v[0:1], off global_load_b32 v8, v[2:3], off s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) v_add3_u32 v7, v8, v7, v6 global_store_b32 v[4:5], v7, off s_cbranch_scc0 .LBB6_4 .LBB6_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11addKernel07PiS_S_i .amdhsa_group_segment_fixed_size 49152 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end6: .size _Z11addKernel07PiS_S_i, .Lfunc_end6-_Z11addKernel07PiS_S_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z11addKernel08PiS_S_i .globl _Z11addKernel08PiS_S_i .p2align 8 .type _Z11addKernel08PiS_S_i,@function _Z11addKernel08PiS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB7_2 v_dual_mov_b32 v2, 1 :: v_dual_and_b32 v1, 63, v0 ds_store_b8 v1, v2 .LBB7_2: s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB7_5 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_and_b32_e32 v1, 63, v0 v_lshlrev_b32_e32 v4, 2, v0 ds_load_u8 v6, v1 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, s3, s6, v4 v_add_co_u32 v2, s0, s0, v4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, null, s1, 0, s0 v_add_co_u32 v4, s0, s4, v4 v_add_co_ci_u32_e64 v1, null, s7, 0, s3 v_add_co_ci_u32_e64 v5, null, s5, 0, s0 .LBB7_4: global_load_b32 v7, v[0:1], off global_load_b32 v8, v[2:3], off s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) v_add3_u32 v7, v8, v7, v6 global_store_b32 v[4:5], v7, off s_cbranch_scc0 .LBB7_4 .LBB7_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11addKernel08PiS_S_i .amdhsa_group_segment_fixed_size 32768 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end7: .size _Z11addKernel08PiS_S_i, .Lfunc_end7-_Z11addKernel08PiS_S_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z11addKernel09PiS_S_i .globl _Z11addKernel09PiS_S_i .p2align 8 .type _Z11addKernel09PiS_S_i,@function _Z11addKernel09PiS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB8_2 v_dual_mov_b32 v2, 1 :: v_dual_and_b32 v1, 63, v0 ds_store_b8 v1, v2 .LBB8_2: s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB8_5 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_and_b32_e32 v1, 63, v0 v_lshlrev_b32_e32 v4, 2, v0 ds_load_u8 v6, v1 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, s3, s6, v4 v_add_co_u32 v2, s0, s0, v4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, null, s1, 0, s0 v_add_co_u32 v4, s0, s4, v4 v_add_co_ci_u32_e64 v1, null, s7, 0, s3 v_add_co_ci_u32_e64 v5, null, s5, 0, s0 .LBB8_4: global_load_b32 v7, v[0:1], off global_load_b32 v8, v[2:3], off s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) v_add3_u32 v7, v8, v7, v6 global_store_b32 v[4:5], v7, off s_cbranch_scc0 .LBB8_4 .LBB8_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11addKernel09PiS_S_i .amdhsa_group_segment_fixed_size 40960 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end8: .size _Z11addKernel09PiS_S_i, .Lfunc_end8-_Z11addKernel09PiS_S_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z11addKernel10PiS_S_i .globl _Z11addKernel10PiS_S_i .p2align 8 .type _Z11addKernel10PiS_S_i,@function _Z11addKernel10PiS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB9_2 v_dual_mov_b32 v2, 1 :: v_dual_and_b32 v1, 63, v0 ds_store_b8 v1, v2 .LBB9_2: s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB9_5 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_and_b32_e32 v1, 63, v0 v_lshlrev_b32_e32 v4, 2, v0 ds_load_u8 v6, v1 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, s3, s6, v4 v_add_co_u32 v2, s0, s0, v4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, null, s1, 0, s0 v_add_co_u32 v4, s0, s4, v4 v_add_co_ci_u32_e64 v1, null, s7, 0, s3 v_add_co_ci_u32_e64 v5, null, s5, 0, s0 .LBB9_4: global_load_b32 v7, v[0:1], off global_load_b32 v8, v[2:3], off s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) v_add3_u32 v7, v8, v7, v6 global_store_b32 v[4:5], v7, off s_cbranch_scc0 .LBB9_4 .LBB9_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11addKernel10PiS_S_i .amdhsa_group_segment_fixed_size 16384 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end9: .size _Z11addKernel10PiS_S_i, .Lfunc_end9-_Z11addKernel10PiS_S_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z11addKernel11PiS_S_i .globl _Z11addKernel11PiS_S_i .p2align 8 .type _Z11addKernel11PiS_S_i,@function _Z11addKernel11PiS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB10_2 v_dual_mov_b32 v2, 1 :: v_dual_and_b32 v1, 63, v0 ds_store_b8 v1, v2 .LBB10_2: s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB10_5 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_and_b32_e32 v1, 63, v0 v_lshlrev_b32_e32 v4, 2, v0 ds_load_u8 v6, v1 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, s3, s6, v4 v_add_co_u32 v2, s0, s0, v4 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, null, s1, 0, s0 v_add_co_u32 v4, s0, s4, v4 v_add_co_ci_u32_e64 v1, null, s7, 0, s3 v_add_co_ci_u32_e64 v5, null, s5, 0, s0 .LBB10_4: global_load_b32 v7, v[0:1], off global_load_b32 v8, v[2:3], off s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) v_add3_u32 v7, v8, v7, v6 global_store_b32 v[4:5], v7, off s_cbranch_scc0 .LBB10_4 .LBB10_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11addKernel11PiS_S_i .amdhsa_group_segment_fixed_size 8192 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end10: .size _Z11addKernel11PiS_S_i, .Lfunc_end10-_Z11addKernel11PiS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 49152 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11addKernel01PiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z11addKernel01PiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 49152 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11addKernel02PiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z11addKernel02PiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 49152 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11addKernel03PiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z11addKernel03PiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 49152 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11addKernel04PiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z11addKernel04PiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 49152 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11addKernel05PiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z11addKernel05PiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 49152 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11addKernel06PiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z11addKernel06PiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 49152 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11addKernel07PiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z11addKernel07PiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 32768 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11addKernel08PiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z11addKernel08PiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 40960 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11addKernel09PiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z11addKernel09PiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 16384 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11addKernel10PiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z11addKernel10PiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 8192 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11addKernel11PiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z11addKernel11PiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> __global__ void addKernel01(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel02(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel03(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel04(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel05(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[i] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel06(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel07(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel08(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[32 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel09(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[40 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel10(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[16 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel11(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[8 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; }
.text .file "VectorAdd.hip" .globl _Z26__device_stub__addKernel01PiS_S_i # -- Begin function _Z26__device_stub__addKernel01PiS_S_i .p2align 4, 0x90 .type _Z26__device_stub__addKernel01PiS_S_i,@function _Z26__device_stub__addKernel01PiS_S_i: # @_Z26__device_stub__addKernel01PiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11addKernel01PiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z26__device_stub__addKernel01PiS_S_i, .Lfunc_end0-_Z26__device_stub__addKernel01PiS_S_i .cfi_endproc # -- End function .globl _Z26__device_stub__addKernel02PiS_S_i # -- Begin function _Z26__device_stub__addKernel02PiS_S_i .p2align 4, 0x90 .type _Z26__device_stub__addKernel02PiS_S_i,@function _Z26__device_stub__addKernel02PiS_S_i: # @_Z26__device_stub__addKernel02PiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11addKernel02PiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z26__device_stub__addKernel02PiS_S_i, .Lfunc_end1-_Z26__device_stub__addKernel02PiS_S_i .cfi_endproc # -- End function .globl _Z26__device_stub__addKernel03PiS_S_i # -- Begin function _Z26__device_stub__addKernel03PiS_S_i .p2align 4, 0x90 .type _Z26__device_stub__addKernel03PiS_S_i,@function _Z26__device_stub__addKernel03PiS_S_i: # @_Z26__device_stub__addKernel03PiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11addKernel03PiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end2: .size _Z26__device_stub__addKernel03PiS_S_i, .Lfunc_end2-_Z26__device_stub__addKernel03PiS_S_i .cfi_endproc # -- End function .globl _Z26__device_stub__addKernel04PiS_S_i # -- Begin function _Z26__device_stub__addKernel04PiS_S_i .p2align 4, 0x90 .type _Z26__device_stub__addKernel04PiS_S_i,@function _Z26__device_stub__addKernel04PiS_S_i: # @_Z26__device_stub__addKernel04PiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11addKernel04PiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end3: .size _Z26__device_stub__addKernel04PiS_S_i, .Lfunc_end3-_Z26__device_stub__addKernel04PiS_S_i .cfi_endproc # -- End function .globl _Z26__device_stub__addKernel05PiS_S_i # -- Begin function _Z26__device_stub__addKernel05PiS_S_i .p2align 4, 0x90 .type _Z26__device_stub__addKernel05PiS_S_i,@function _Z26__device_stub__addKernel05PiS_S_i: # @_Z26__device_stub__addKernel05PiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11addKernel05PiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end4: .size _Z26__device_stub__addKernel05PiS_S_i, .Lfunc_end4-_Z26__device_stub__addKernel05PiS_S_i .cfi_endproc # -- End function .globl _Z26__device_stub__addKernel06PiS_S_i # -- Begin function _Z26__device_stub__addKernel06PiS_S_i .p2align 4, 0x90 .type _Z26__device_stub__addKernel06PiS_S_i,@function _Z26__device_stub__addKernel06PiS_S_i: # @_Z26__device_stub__addKernel06PiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11addKernel06PiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end5: .size _Z26__device_stub__addKernel06PiS_S_i, .Lfunc_end5-_Z26__device_stub__addKernel06PiS_S_i .cfi_endproc # -- End function .globl _Z26__device_stub__addKernel07PiS_S_i # -- Begin function _Z26__device_stub__addKernel07PiS_S_i .p2align 4, 0x90 .type _Z26__device_stub__addKernel07PiS_S_i,@function _Z26__device_stub__addKernel07PiS_S_i: # @_Z26__device_stub__addKernel07PiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11addKernel07PiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end6: .size _Z26__device_stub__addKernel07PiS_S_i, .Lfunc_end6-_Z26__device_stub__addKernel07PiS_S_i .cfi_endproc # -- End function .globl _Z26__device_stub__addKernel08PiS_S_i # -- Begin function _Z26__device_stub__addKernel08PiS_S_i .p2align 4, 0x90 .type _Z26__device_stub__addKernel08PiS_S_i,@function _Z26__device_stub__addKernel08PiS_S_i: # @_Z26__device_stub__addKernel08PiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11addKernel08PiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end7: .size _Z26__device_stub__addKernel08PiS_S_i, .Lfunc_end7-_Z26__device_stub__addKernel08PiS_S_i .cfi_endproc # -- End function .globl _Z26__device_stub__addKernel09PiS_S_i # -- Begin function _Z26__device_stub__addKernel09PiS_S_i .p2align 4, 0x90 .type _Z26__device_stub__addKernel09PiS_S_i,@function _Z26__device_stub__addKernel09PiS_S_i: # @_Z26__device_stub__addKernel09PiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11addKernel09PiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end8: .size _Z26__device_stub__addKernel09PiS_S_i, .Lfunc_end8-_Z26__device_stub__addKernel09PiS_S_i .cfi_endproc # -- End function .globl _Z26__device_stub__addKernel10PiS_S_i # -- Begin function _Z26__device_stub__addKernel10PiS_S_i .p2align 4, 0x90 .type _Z26__device_stub__addKernel10PiS_S_i,@function _Z26__device_stub__addKernel10PiS_S_i: # @_Z26__device_stub__addKernel10PiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11addKernel10PiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end9: .size _Z26__device_stub__addKernel10PiS_S_i, .Lfunc_end9-_Z26__device_stub__addKernel10PiS_S_i .cfi_endproc # -- End function .globl _Z26__device_stub__addKernel11PiS_S_i # -- Begin function _Z26__device_stub__addKernel11PiS_S_i .p2align 4, 0x90 .type _Z26__device_stub__addKernel11PiS_S_i,@function _Z26__device_stub__addKernel11PiS_S_i: # @_Z26__device_stub__addKernel11PiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11addKernel11PiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end10: .size _Z26__device_stub__addKernel11PiS_S_i, .Lfunc_end10-_Z26__device_stub__addKernel11PiS_S_i .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB11_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB11_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11addKernel01PiS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11addKernel02PiS_S_i, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11addKernel03PiS_S_i, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11addKernel04PiS_S_i, %esi movl $.L__unnamed_4, %edx movl $.L__unnamed_4, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11addKernel05PiS_S_i, %esi movl $.L__unnamed_5, %edx movl $.L__unnamed_5, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11addKernel06PiS_S_i, %esi movl $.L__unnamed_6, %edx movl $.L__unnamed_6, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11addKernel07PiS_S_i, %esi movl $.L__unnamed_7, %edx movl $.L__unnamed_7, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11addKernel08PiS_S_i, %esi movl $.L__unnamed_8, %edx movl $.L__unnamed_8, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11addKernel09PiS_S_i, %esi movl $.L__unnamed_9, %edx movl $.L__unnamed_9, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11addKernel10PiS_S_i, %esi movl $.L__unnamed_10, %edx movl $.L__unnamed_10, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11addKernel11PiS_S_i, %esi movl $.L__unnamed_11, %edx movl $.L__unnamed_11, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end11: .size __hip_module_ctor, .Lfunc_end11-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB12_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB12_2: retq .Lfunc_end12: .size __hip_module_dtor, .Lfunc_end12-__hip_module_dtor .cfi_endproc # -- End function .type _Z11addKernel01PiS_S_i,@object # @_Z11addKernel01PiS_S_i .section .rodata,"a",@progbits .globl _Z11addKernel01PiS_S_i .p2align 3, 0x0 _Z11addKernel01PiS_S_i: .quad _Z26__device_stub__addKernel01PiS_S_i .size _Z11addKernel01PiS_S_i, 8 .type _Z11addKernel02PiS_S_i,@object # @_Z11addKernel02PiS_S_i .globl _Z11addKernel02PiS_S_i .p2align 3, 0x0 _Z11addKernel02PiS_S_i: .quad _Z26__device_stub__addKernel02PiS_S_i .size _Z11addKernel02PiS_S_i, 8 .type _Z11addKernel03PiS_S_i,@object # @_Z11addKernel03PiS_S_i .globl _Z11addKernel03PiS_S_i .p2align 3, 0x0 _Z11addKernel03PiS_S_i: .quad _Z26__device_stub__addKernel03PiS_S_i .size _Z11addKernel03PiS_S_i, 8 .type _Z11addKernel04PiS_S_i,@object # @_Z11addKernel04PiS_S_i .globl _Z11addKernel04PiS_S_i .p2align 3, 0x0 _Z11addKernel04PiS_S_i: .quad _Z26__device_stub__addKernel04PiS_S_i .size _Z11addKernel04PiS_S_i, 8 .type _Z11addKernel05PiS_S_i,@object # @_Z11addKernel05PiS_S_i .globl _Z11addKernel05PiS_S_i .p2align 3, 0x0 _Z11addKernel05PiS_S_i: .quad _Z26__device_stub__addKernel05PiS_S_i .size _Z11addKernel05PiS_S_i, 8 .type _Z11addKernel06PiS_S_i,@object # @_Z11addKernel06PiS_S_i .globl _Z11addKernel06PiS_S_i .p2align 3, 0x0 _Z11addKernel06PiS_S_i: .quad _Z26__device_stub__addKernel06PiS_S_i .size _Z11addKernel06PiS_S_i, 8 .type _Z11addKernel07PiS_S_i,@object # @_Z11addKernel07PiS_S_i .globl _Z11addKernel07PiS_S_i .p2align 3, 0x0 _Z11addKernel07PiS_S_i: .quad _Z26__device_stub__addKernel07PiS_S_i .size _Z11addKernel07PiS_S_i, 8 .type _Z11addKernel08PiS_S_i,@object # @_Z11addKernel08PiS_S_i .globl _Z11addKernel08PiS_S_i .p2align 3, 0x0 _Z11addKernel08PiS_S_i: .quad _Z26__device_stub__addKernel08PiS_S_i .size _Z11addKernel08PiS_S_i, 8 .type _Z11addKernel09PiS_S_i,@object # @_Z11addKernel09PiS_S_i .globl _Z11addKernel09PiS_S_i .p2align 3, 0x0 _Z11addKernel09PiS_S_i: .quad _Z26__device_stub__addKernel09PiS_S_i .size _Z11addKernel09PiS_S_i, 8 .type _Z11addKernel10PiS_S_i,@object # @_Z11addKernel10PiS_S_i .globl _Z11addKernel10PiS_S_i .p2align 3, 0x0 _Z11addKernel10PiS_S_i: .quad _Z26__device_stub__addKernel10PiS_S_i .size _Z11addKernel10PiS_S_i, 8 .type _Z11addKernel11PiS_S_i,@object # @_Z11addKernel11PiS_S_i .globl _Z11addKernel11PiS_S_i .p2align 3, 0x0 _Z11addKernel11PiS_S_i: .quad _Z26__device_stub__addKernel11PiS_S_i .size _Z11addKernel11PiS_S_i, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z11addKernel01PiS_S_i" .size .L__unnamed_1, 23 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z11addKernel02PiS_S_i" .size .L__unnamed_2, 23 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z11addKernel03PiS_S_i" .size .L__unnamed_3, 23 .type .L__unnamed_4,@object # @3 .L__unnamed_4: .asciz "_Z11addKernel04PiS_S_i" .size .L__unnamed_4, 23 .type .L__unnamed_5,@object # @4 .L__unnamed_5: .asciz "_Z11addKernel05PiS_S_i" .size .L__unnamed_5, 23 .type .L__unnamed_6,@object # @5 .L__unnamed_6: .asciz "_Z11addKernel06PiS_S_i" .size .L__unnamed_6, 23 .type .L__unnamed_7,@object # @6 .L__unnamed_7: .asciz "_Z11addKernel07PiS_S_i" .size .L__unnamed_7, 23 .type .L__unnamed_8,@object # @7 .L__unnamed_8: .asciz "_Z11addKernel08PiS_S_i" .size .L__unnamed_8, 23 .type .L__unnamed_9,@object # @8 .L__unnamed_9: .asciz "_Z11addKernel09PiS_S_i" .size .L__unnamed_9, 23 .type .L__unnamed_10,@object # @9 .L__unnamed_10: .asciz "_Z11addKernel10PiS_S_i" .size .L__unnamed_10, 23 .type .L__unnamed_11,@object # @10 .L__unnamed_11: .asciz "_Z11addKernel11PiS_S_i" .size .L__unnamed_11, 23 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z26__device_stub__addKernel01PiS_S_i .addrsig_sym _Z26__device_stub__addKernel02PiS_S_i .addrsig_sym _Z26__device_stub__addKernel03PiS_S_i .addrsig_sym _Z26__device_stub__addKernel04PiS_S_i .addrsig_sym _Z26__device_stub__addKernel05PiS_S_i .addrsig_sym _Z26__device_stub__addKernel06PiS_S_i .addrsig_sym _Z26__device_stub__addKernel07PiS_S_i .addrsig_sym _Z26__device_stub__addKernel08PiS_S_i .addrsig_sym _Z26__device_stub__addKernel09PiS_S_i .addrsig_sym _Z26__device_stub__addKernel10PiS_S_i .addrsig_sym _Z26__device_stub__addKernel11PiS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z11addKernel01PiS_S_i .addrsig_sym _Z11addKernel02PiS_S_i .addrsig_sym _Z11addKernel03PiS_S_i .addrsig_sym _Z11addKernel04PiS_S_i .addrsig_sym _Z11addKernel05PiS_S_i .addrsig_sym _Z11addKernel06PiS_S_i .addrsig_sym _Z11addKernel07PiS_S_i .addrsig_sym _Z11addKernel08PiS_S_i .addrsig_sym _Z11addKernel09PiS_S_i .addrsig_sym _Z11addKernel10PiS_S_i .addrsig_sym _Z11addKernel11PiS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" extern "C" { } __global__ void gaxpy2(const double *a, const double *b, double *c) { int i = threadIdx.x + threadIdx.y*blockDim.x+threadIdx.z*blockDim.x*blockDim.y; c[i] = a[0]*b[i] + c[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!! }
code for sm_80 Function : _Z6gaxpy2PKdS0_Pd .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */ /* 0x000e220000002200 */ /*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff077435 */ /* 0x000fe200000001ff */ /*0030*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */ /* 0x000fe20000000f00 */ /*0040*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0050*/ S2R R5, SR_TID.Z ; /* 0x0000000000057919 */ /* 0x000e280000002300 */ /*0060*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e620000002100 */ /*0070*/ IMAD R0, R5, c[0x0][0x4], R0 ; /* 0x0000010005007a24 */ /* 0x001fc800078e0200 */ /*0080*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x002fe200078e0203 */ /*0090*/ MOV R3, c[0x0][0x164] ; /* 0x0000590000037a02 */ /* 0x000fc60000000f00 */ /*00a0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x000fc600078e0207 */ /*00b0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1b00 */ /*00c0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */ /* 0x000fc600078e0207 */ /*00d0*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1b00 */ /*00e0*/ LDG.E.64 R8, [R6.64] ; /* 0x0000000406087981 */ /* 0x000ea4000c1e1b00 */ /*00f0*/ DFMA R8, R2, R4, R8 ; /* 0x000000040208722b */ /* 0x004e0e0000000008 */ /*0100*/ STG.E.64 [R6.64], R8 ; /* 0x0000000806007986 */ /* 0x001fe2000c101b04 */ /*0110*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0120*/ BRA 0x120; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" extern "C" { } __global__ void gaxpy2(const double *a, const double *b, double *c) { int i = threadIdx.x + threadIdx.y*blockDim.x+threadIdx.z*blockDim.x*blockDim.y; c[i] = a[0]*b[i] + c[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!! }
.file "tmpxft_0009717d_00000000-6_gaxpy2.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z31__device_stub__Z6gaxpy2PKdS0_PdPKdS0_Pd .type _Z31__device_stub__Z6gaxpy2PKdS0_PdPKdS0_Pd, @function _Z31__device_stub__Z6gaxpy2PKdS0_PdPKdS0_Pd: .LFB2051: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z6gaxpy2PKdS0_Pd(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z31__device_stub__Z6gaxpy2PKdS0_PdPKdS0_Pd, .-_Z31__device_stub__Z6gaxpy2PKdS0_PdPKdS0_Pd .globl _Z6gaxpy2PKdS0_Pd .type _Z6gaxpy2PKdS0_Pd, @function _Z6gaxpy2PKdS0_Pd: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z6gaxpy2PKdS0_PdPKdS0_Pd addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z6gaxpy2PKdS0_Pd, .-_Z6gaxpy2PKdS0_Pd .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z6gaxpy2PKdS0_Pd" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z6gaxpy2PKdS0_Pd(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" extern "C" { } __global__ void gaxpy2(const double *a, const double *b, double *c) { int i = threadIdx.x + threadIdx.y*blockDim.x+threadIdx.z*blockDim.x*blockDim.y; c[i] = a[0]*b[i] + c[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!! }
#include <hip/hip_runtime.h> #include "includes.h" extern "C" { } __global__ void gaxpy2(const double *a, const double *b, double *c) { int i = threadIdx.x + threadIdx.y*blockDim.x+threadIdx.z*blockDim.x*blockDim.y; c[i] = a[0]*b[i] + c[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!! }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" extern "C" { } __global__ void gaxpy2(const double *a, const double *b, double *c) { int i = threadIdx.x + threadIdx.y*blockDim.x+threadIdx.z*blockDim.x*blockDim.y; c[i] = a[0]*b[i] + c[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!! }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6gaxpy2PKdS0_Pd .globl _Z6gaxpy2PKdS0_Pd .p2align 8 .type _Z6gaxpy2PKdS0_Pd,@function _Z6gaxpy2PKdS0_Pd: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b128 s[4:7], s[0:1], 0x0 v_bfe_u32 v1, v0, 10, 10 v_bfe_u32 v2, v0, 20, 10 v_and_b32_e32 v0, 0x3ff, v0 s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_lshr_b32 s3, s2, 16 s_and_b32 s2, s2, 0xffff v_mad_u32_u24 v3, v2, s3, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, v3, s2, v[0:1] v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 3, v[1:2] v_add_co_u32 v2, vcc_lo, s6, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_load_b64 v[2:3], v[2:3], off global_load_b64 v[4:5], v[0:1], off s_load_b64 s[0:1], s[4:5], 0x0 s_waitcnt vmcnt(0) lgkmcnt(0) v_fma_f64 v[2:3], s[0:1], v[2:3], v[4:5] global_store_b64 v[0:1], v[2:3], off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6gaxpy2PKdS0_Pd .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 2 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 8 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6gaxpy2PKdS0_Pd, .Lfunc_end0-_Z6gaxpy2PKdS0_Pd .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6gaxpy2PKdS0_Pd .private_segment_fixed_size: 0 .sgpr_count: 10 .sgpr_spill_count: 0 .symbol: _Z6gaxpy2PKdS0_Pd.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" extern "C" { } __global__ void gaxpy2(const double *a, const double *b, double *c) { int i = threadIdx.x + threadIdx.y*blockDim.x+threadIdx.z*blockDim.x*blockDim.y; c[i] = a[0]*b[i] + c[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!! }
.text .file "gaxpy2.hip" .globl _Z21__device_stub__gaxpy2PKdS0_Pd # -- Begin function _Z21__device_stub__gaxpy2PKdS0_Pd .p2align 4, 0x90 .type _Z21__device_stub__gaxpy2PKdS0_Pd,@function _Z21__device_stub__gaxpy2PKdS0_Pd: # @_Z21__device_stub__gaxpy2PKdS0_Pd .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6gaxpy2PKdS0_Pd, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z21__device_stub__gaxpy2PKdS0_Pd, .Lfunc_end0-_Z21__device_stub__gaxpy2PKdS0_Pd .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6gaxpy2PKdS0_Pd, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z6gaxpy2PKdS0_Pd,@object # @_Z6gaxpy2PKdS0_Pd .section .rodata,"a",@progbits .globl _Z6gaxpy2PKdS0_Pd .p2align 3, 0x0 _Z6gaxpy2PKdS0_Pd: .quad _Z21__device_stub__gaxpy2PKdS0_Pd .size _Z6gaxpy2PKdS0_Pd, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z6gaxpy2PKdS0_Pd" .size .L__unnamed_1, 18 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__gaxpy2PKdS0_Pd .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6gaxpy2PKdS0_Pd .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z6gaxpy2PKdS0_Pd .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */ /* 0x000e220000002200 */ /*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff077435 */ /* 0x000fe200000001ff */ /*0030*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */ /* 0x000fe20000000f00 */ /*0040*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0050*/ S2R R5, SR_TID.Z ; /* 0x0000000000057919 */ /* 0x000e280000002300 */ /*0060*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e620000002100 */ /*0070*/ IMAD R0, R5, c[0x0][0x4], R0 ; /* 0x0000010005007a24 */ /* 0x001fc800078e0200 */ /*0080*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x002fe200078e0203 */ /*0090*/ MOV R3, c[0x0][0x164] ; /* 0x0000590000037a02 */ /* 0x000fc60000000f00 */ /*00a0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x000fc600078e0207 */ /*00b0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1b00 */ /*00c0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */ /* 0x000fc600078e0207 */ /*00d0*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1b00 */ /*00e0*/ LDG.E.64 R8, [R6.64] ; /* 0x0000000406087981 */ /* 0x000ea4000c1e1b00 */ /*00f0*/ DFMA R8, R2, R4, R8 ; /* 0x000000040208722b */ /* 0x004e0e0000000008 */ /*0100*/ STG.E.64 [R6.64], R8 ; /* 0x0000000806007986 */ /* 0x001fe2000c101b04 */ /*0110*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0120*/ BRA 0x120; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6gaxpy2PKdS0_Pd .globl _Z6gaxpy2PKdS0_Pd .p2align 8 .type _Z6gaxpy2PKdS0_Pd,@function _Z6gaxpy2PKdS0_Pd: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b128 s[4:7], s[0:1], 0x0 v_bfe_u32 v1, v0, 10, 10 v_bfe_u32 v2, v0, 20, 10 v_and_b32_e32 v0, 0x3ff, v0 s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_lshr_b32 s3, s2, 16 s_and_b32 s2, s2, 0xffff v_mad_u32_u24 v3, v2, s3, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, v3, s2, v[0:1] v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 3, v[1:2] v_add_co_u32 v2, vcc_lo, s6, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_load_b64 v[2:3], v[2:3], off global_load_b64 v[4:5], v[0:1], off s_load_b64 s[0:1], s[4:5], 0x0 s_waitcnt vmcnt(0) lgkmcnt(0) v_fma_f64 v[2:3], s[0:1], v[2:3], v[4:5] global_store_b64 v[0:1], v[2:3], off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6gaxpy2PKdS0_Pd .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 2 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 8 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6gaxpy2PKdS0_Pd, .Lfunc_end0-_Z6gaxpy2PKdS0_Pd .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6gaxpy2PKdS0_Pd .private_segment_fixed_size: 0 .sgpr_count: 10 .sgpr_spill_count: 0 .symbol: _Z6gaxpy2PKdS0_Pd.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0009717d_00000000-6_gaxpy2.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z31__device_stub__Z6gaxpy2PKdS0_PdPKdS0_Pd .type _Z31__device_stub__Z6gaxpy2PKdS0_PdPKdS0_Pd, @function _Z31__device_stub__Z6gaxpy2PKdS0_PdPKdS0_Pd: .LFB2051: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z6gaxpy2PKdS0_Pd(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z31__device_stub__Z6gaxpy2PKdS0_PdPKdS0_Pd, .-_Z31__device_stub__Z6gaxpy2PKdS0_PdPKdS0_Pd .globl _Z6gaxpy2PKdS0_Pd .type _Z6gaxpy2PKdS0_Pd, @function _Z6gaxpy2PKdS0_Pd: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z6gaxpy2PKdS0_PdPKdS0_Pd addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z6gaxpy2PKdS0_Pd, .-_Z6gaxpy2PKdS0_Pd .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z6gaxpy2PKdS0_Pd" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z6gaxpy2PKdS0_Pd(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "gaxpy2.hip" .globl _Z21__device_stub__gaxpy2PKdS0_Pd # -- Begin function _Z21__device_stub__gaxpy2PKdS0_Pd .p2align 4, 0x90 .type _Z21__device_stub__gaxpy2PKdS0_Pd,@function _Z21__device_stub__gaxpy2PKdS0_Pd: # @_Z21__device_stub__gaxpy2PKdS0_Pd .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6gaxpy2PKdS0_Pd, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z21__device_stub__gaxpy2PKdS0_Pd, .Lfunc_end0-_Z21__device_stub__gaxpy2PKdS0_Pd .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6gaxpy2PKdS0_Pd, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z6gaxpy2PKdS0_Pd,@object # @_Z6gaxpy2PKdS0_Pd .section .rodata,"a",@progbits .globl _Z6gaxpy2PKdS0_Pd .p2align 3, 0x0 _Z6gaxpy2PKdS0_Pd: .quad _Z21__device_stub__gaxpy2PKdS0_Pd .size _Z6gaxpy2PKdS0_Pd, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z6gaxpy2PKdS0_Pd" .size .L__unnamed_1, 18 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__gaxpy2PKdS0_Pd .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6gaxpy2PKdS0_Pd .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> const int TILE_DIM = 32; const int BLOCK_ROWS = 8; /** * the non-square CUDA transpose kernel is * writtern By Amir Hossein Bakhtiary, use as you wish. Shouldn't have any copyright problems. */ // http://amirsworklog.blogspot.gr/2015/01/cuda-matrix-transpose-code.html __global__ void transposeCoalesced(double *odata, const double *idata, int rows,int cols) { __shared__ double tile[TILE_DIM][TILE_DIM+1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; // if (x >= cols||y >= rows){ // return; // } int maxJ = TILE_DIM; int maxJ2 = TILE_DIM; int otherMaxJ = rows - y; if (maxJ > otherMaxJ) maxJ = otherMaxJ; if ( x < cols ){ for (int j = 0; j < maxJ; j += BLOCK_ROWS) tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*cols + x]; } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; int otherMaxJ2 = cols - y; if (maxJ2 > otherMaxJ2){ maxJ2 = otherMaxJ2; } if ( x < rows){ for (int j = 0; j < maxJ2; j += BLOCK_ROWS) odata[(y+j)*rows + x] = tile[threadIdx.x][threadIdx.y + j]; } } __global__ void cuconvolve_youngCausal(double * in, double * out, int rows, int columns, double B, double *bf) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<columns) { /* Compute first 3 output elements */ out[idx] = B*in[idx]; out[idx+columns] = B*in[idx+columns] + bf[2]*out[idx]; out[idx+2*columns] = B*in[idx+2*columns] + (bf[1]*out[idx]+bf[2]*out[idx+columns]); /* Recursive computation of output in forward direction using filter parameters bf and B */ for(int i=3; i<rows; i++) { out[idx+i*columns] = B*in[idx+i*columns]; for(int j=0; j<3; j++) { out[idx+i*columns] += bf[j]*out[idx + (i-(3-j))*columns]; } } } } __global__ void cuconvolve_youngAnticausal(double * in, double * out, int rows, int columns, double B, double *bb) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; int total = columns*(rows-1); if(idx<columns) { /* Compute last 3 output elements */ out[total + idx] = B*in[total + idx]; out[total + idx - columns] = B*in[total + idx - columns] + bb[0]*out[total + idx]; out[total + idx - 2*columns] = B*in[total + idx - 2*columns] + (bb[0]*out[total + idx - columns]+bb[1]*out[total + idx]); /* Recursive computation of output in backward direction using filter parameters bb and B */ for (int i=3; i<rows-1; i++) { out[total + idx - i*columns] = B*in[total + idx - i*columns]; for (int j=0; j<3; j++) { out[total + idx - i*columns] += bb[j]*out[total + idx - (i-(j+1))*columns]; } } } } extern "C" void cudaYoung(double * in, double * out, int rows, int columns, double *bf, double *bb, double B) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); /** \brief Array to store output of Causal filter convolution */ double *d_input, *d_output, *d_bf, *d_bb; cudaMalloc((void**) &d_input, rows*columns*sizeof(double)); cudaMalloc((void**) &d_output, rows*columns*sizeof(double)); cudaMalloc((void**) &d_bf, rows*columns*sizeof(double)); cudaMalloc((void**) &d_bb, rows*columns*sizeof(double)); cudaMemcpy(d_input, in, rows*columns*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_bf, bf, 3*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_bb, bb, 3*sizeof(double), cudaMemcpyHostToDevice); dim3 dimGrid1((columns+TILE_DIM-1)/TILE_DIM,(rows+TILE_DIM-1)/TILE_DIM, 1); dim3 dimGrid2((rows+TILE_DIM-1)/TILE_DIM,(columns+TILE_DIM-1)/TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); // -------- Convolve Rows---------- transposeCoalesced<<< dimGrid1, dimBlock>>>(d_output, d_input, rows, columns); cuconvolve_youngCausal<<<rows/256 + 1 , 256>>>(d_output, d_input, columns, rows, B, d_bf); cuconvolve_youngAnticausal<<<rows/256 + 1, 256>>>(d_input, d_output, columns, rows, B, d_bb); // -------- Convolve Columns ---------- transposeCoalesced<<< dimGrid2, dimBlock>>>(d_input, d_output, columns, rows); cuconvolve_youngCausal<<<columns/256 + 1, 256>>>(d_input, d_output, rows, columns, B, d_bf); cuconvolve_youngAnticausal<<<columns/256 + 1, 256>>>(d_output, d_input, rows, columns, B, d_bb); cudaMemcpy(in, d_input, rows*columns*sizeof(double), cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Execution time elapsed: %f ms\n", milliseconds); cudaFree(d_input); cudaFree(d_output); cudaFree(d_bf); cudaFree(d_bb); }
.file "tmpxft_0015688f_00000000-6_cuda_young.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii .type _Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii, @function _Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii: .LFB2082: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z18transposeCoalescedPdPKdii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii, .-_Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii .globl _Z18transposeCoalescedPdPKdii .type _Z18transposeCoalescedPdPKdii, @function _Z18transposeCoalescedPdPKdii: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z18transposeCoalescedPdPKdii, .-_Z18transposeCoalescedPdPKdii .globl _Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_ .type _Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_, @function _Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_: .LFB2084: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movl %edx, 28(%rsp) movl %ecx, 24(%rsp) movsd %xmm0, 16(%rsp) movq %r8, 8(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 28(%rsp), %rax movq %rax, 128(%rsp) leaq 24(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 168(%rsp), %rax subq %fs:40, %rax jne .L16 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z22cuconvolve_youngCausalPdS_iidS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_, .-_Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_ .globl _Z22cuconvolve_youngCausalPdS_iidS_ .type _Z22cuconvolve_youngCausalPdS_iidS_, @function _Z22cuconvolve_youngCausalPdS_iidS_: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z22cuconvolve_youngCausalPdS_iidS_, .-_Z22cuconvolve_youngCausalPdS_iidS_ .globl _Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_ .type _Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_, @function _Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_: .LFB2086: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movl %edx, 28(%rsp) movl %ecx, 24(%rsp) movsd %xmm0, 16(%rsp) movq %r8, 8(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 28(%rsp), %rax movq %rax, 128(%rsp) leaq 24(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L23 .L19: movq 168(%rsp), %rax subq %fs:40, %rax jne .L24 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z26cuconvolve_youngAnticausalPdS_iidS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L19 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE2086: .size _Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_, .-_Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_ .globl _Z26cuconvolve_youngAnticausalPdS_iidS_ .type _Z26cuconvolve_youngAnticausalPdS_iidS_, @function _Z26cuconvolve_youngAnticausalPdS_iidS_: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _Z26cuconvolve_youngAnticausalPdS_iidS_, .-_Z26cuconvolve_youngAnticausalPdS_iidS_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "Execution time elapsed: %f ms\n" .text .globl cudaYoung .type cudaYoung, @function cudaYoung: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $152, %rsp .cfi_def_cfa_offset 208 movq %rdi, %r14 movl %edx, %ebx movl %ecx, %ebp movq %r8, %r15 movq %r9, %r13 movsd %xmm0, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rdi call cudaEventCreate@PLT leaq 32(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 24(%rsp), %rdi call cudaEventRecord@PLT movl %ebx, %r12d imull %ebp, %r12d movslq %r12d, %r12 salq $3, %r12 leaq 40(%rsp), %rdi movq %r12, %rsi call cudaMalloc@PLT leaq 48(%rsp), %rdi movq %r12, %rsi call cudaMalloc@PLT leaq 56(%rsp), %rdi movq %r12, %rsi call cudaMalloc@PLT leaq 64(%rsp), %rdi movq %r12, %rsi call cudaMalloc@PLT movl $1, %ecx movq %r12, %rdx movq %r14, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $24, %edx movq %r15, %rsi movq 56(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $24, %edx movq %r13, %rsi movq 64(%rsp), %rdi call cudaMemcpy@PLT leal 62(%rbx), %edx movl %ebx, %eax addl $31, %eax cmovns %eax, %edx sarl $5, %edx leal 62(%rbp), %eax movl %ebp, %ecx addl $31, %ecx cmovns %ecx, %eax sarl $5, %eax movl %eax, 76(%rsp) movl %edx, 80(%rsp) movl %edx, 88(%rsp) movl %eax, 92(%rsp) movl $1, 96(%rsp) movl $32, 100(%rsp) movl $8, 104(%rsp) movl $1, 108(%rsp) movl $0, %r9d movl $0, %r8d movq 100(%rsp), %rdx movl $1, %ecx movq 76(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L36 .L28: movl $256, 124(%rsp) movl $1, 128(%rsp) movl $1, 132(%rsp) leal 255(%rbx), %r13d testl %ebx, %ebx cmovns %ebx, %r13d sarl $8, %r13d addl $1, %r13d movl %r13d, 112(%rsp) movl $1, 116(%rsp) movl $1, 120(%rsp) movl $0, %r9d movl $0, %r8d movq 124(%rsp), %rdx movl $1, %ecx movq 112(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L37 .L29: movl $256, 124(%rsp) movl $1, 128(%rsp) movl $1, 132(%rsp) movl %r13d, 112(%rsp) movl $1, 116(%rsp) movl $1, 120(%rsp) movl $0, %r9d movl $0, %r8d movq 124(%rsp), %rdx movl $1, %ecx movq 112(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L38 .L30: movl 108(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 100(%rsp), %rdx movq 88(%rsp), %rdi movl 96(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L39 .L31: movl $256, 124(%rsp) movl $1, 128(%rsp) movl $1, 132(%rsp) leal 255(%rbp), %r13d testl %ebp, %ebp cmovns %ebp, %r13d sarl $8, %r13d addl $1, %r13d movl %r13d, 112(%rsp) movl $1, 116(%rsp) movl $1, 120(%rsp) movl $0, %r9d movl $0, %r8d movq 124(%rsp), %rdx movl $1, %ecx movq 112(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L40 .L32: movl $256, 124(%rsp) movl $1, 128(%rsp) movl $1, 132(%rsp) movl %r13d, 112(%rsp) movl $1, 116(%rsp) movl $1, 120(%rsp) movl $0, %r9d movl $0, %r8d movq 124(%rsp), %rdx movl $1, %ecx movq 112(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L41 .L33: movl $2, %ecx movq %r12, %rdx movq 40(%rsp), %rsi movq %r14, %rdi call cudaMemcpy@PLT movl $0, %esi movq 32(%rsp), %rdi call cudaEventRecord@PLT movq 32(%rsp), %rdi call cudaEventSynchronize@PLT movl $0x00000000, 124(%rsp) leaq 124(%rsp), %rdi movq 32(%rsp), %rdx movq 24(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 124(%rsp), %xmm0 leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq 48(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rdi call cudaFree@PLT movq 64(%rsp), %rdi call cudaFree@PLT movq 136(%rsp), %rax subq %fs:40, %rax jne .L42 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L36: .cfi_restore_state movl %ebp, %ecx movl %ebx, %edx movq 40(%rsp), %rsi movq 48(%rsp), %rdi call _Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii jmp .L28 .L37: movq 56(%rsp), %r8 movsd 8(%rsp), %xmm0 movl %ebx, %ecx movl %ebp, %edx movq 40(%rsp), %rsi movq 48(%rsp), %rdi call _Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_ jmp .L29 .L38: movq 64(%rsp), %r8 movsd 8(%rsp), %xmm0 movl %ebx, %ecx movl %ebp, %edx movq 48(%rsp), %rsi movq 40(%rsp), %rdi call _Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_ jmp .L30 .L39: movl %ebx, %ecx movl %ebp, %edx movq 48(%rsp), %rsi movq 40(%rsp), %rdi call _Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii jmp .L31 .L40: movq 56(%rsp), %r8 movsd 8(%rsp), %xmm0 movl %ebp, %ecx movl %ebx, %edx movq 48(%rsp), %rsi movq 40(%rsp), %rdi call _Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_ jmp .L32 .L41: movq 64(%rsp), %r8 movsd 8(%rsp), %xmm0 movl %ebp, %ecx movl %ebx, %edx movq 40(%rsp), %rsi movq 48(%rsp), %rdi call _Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_ jmp .L33 .L42: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size cudaYoung, .-cudaYoung .section .rodata.str1.8 .align 8 .LC2: .string "_Z26cuconvolve_youngAnticausalPdS_iidS_" .align 8 .LC3: .string "_Z22cuconvolve_youngCausalPdS_iidS_" .section .rodata.str1.1,"aMS",@progbits,1 .LC4: .string "_Z18transposeCoalescedPdPKdii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2089: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z26cuconvolve_youngAnticausalPdS_iidS_(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z22cuconvolve_youngCausalPdS_iidS_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z18transposeCoalescedPdPKdii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> const int TILE_DIM = 32; const int BLOCK_ROWS = 8; /** * the non-square CUDA transpose kernel is * writtern By Amir Hossein Bakhtiary, use as you wish. Shouldn't have any copyright problems. */ // http://amirsworklog.blogspot.gr/2015/01/cuda-matrix-transpose-code.html __global__ void transposeCoalesced(double *odata, const double *idata, int rows,int cols) { __shared__ double tile[TILE_DIM][TILE_DIM+1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; // if (x >= cols||y >= rows){ // return; // } int maxJ = TILE_DIM; int maxJ2 = TILE_DIM; int otherMaxJ = rows - y; if (maxJ > otherMaxJ) maxJ = otherMaxJ; if ( x < cols ){ for (int j = 0; j < maxJ; j += BLOCK_ROWS) tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*cols + x]; } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; int otherMaxJ2 = cols - y; if (maxJ2 > otherMaxJ2){ maxJ2 = otherMaxJ2; } if ( x < rows){ for (int j = 0; j < maxJ2; j += BLOCK_ROWS) odata[(y+j)*rows + x] = tile[threadIdx.x][threadIdx.y + j]; } } __global__ void cuconvolve_youngCausal(double * in, double * out, int rows, int columns, double B, double *bf) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<columns) { /* Compute first 3 output elements */ out[idx] = B*in[idx]; out[idx+columns] = B*in[idx+columns] + bf[2]*out[idx]; out[idx+2*columns] = B*in[idx+2*columns] + (bf[1]*out[idx]+bf[2]*out[idx+columns]); /* Recursive computation of output in forward direction using filter parameters bf and B */ for(int i=3; i<rows; i++) { out[idx+i*columns] = B*in[idx+i*columns]; for(int j=0; j<3; j++) { out[idx+i*columns] += bf[j]*out[idx + (i-(3-j))*columns]; } } } } __global__ void cuconvolve_youngAnticausal(double * in, double * out, int rows, int columns, double B, double *bb) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; int total = columns*(rows-1); if(idx<columns) { /* Compute last 3 output elements */ out[total + idx] = B*in[total + idx]; out[total + idx - columns] = B*in[total + idx - columns] + bb[0]*out[total + idx]; out[total + idx - 2*columns] = B*in[total + idx - 2*columns] + (bb[0]*out[total + idx - columns]+bb[1]*out[total + idx]); /* Recursive computation of output in backward direction using filter parameters bb and B */ for (int i=3; i<rows-1; i++) { out[total + idx - i*columns] = B*in[total + idx - i*columns]; for (int j=0; j<3; j++) { out[total + idx - i*columns] += bb[j]*out[total + idx - (i-(j+1))*columns]; } } } } extern "C" void cudaYoung(double * in, double * out, int rows, int columns, double *bf, double *bb, double B) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); /** \brief Array to store output of Causal filter convolution */ double *d_input, *d_output, *d_bf, *d_bb; cudaMalloc((void**) &d_input, rows*columns*sizeof(double)); cudaMalloc((void**) &d_output, rows*columns*sizeof(double)); cudaMalloc((void**) &d_bf, rows*columns*sizeof(double)); cudaMalloc((void**) &d_bb, rows*columns*sizeof(double)); cudaMemcpy(d_input, in, rows*columns*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_bf, bf, 3*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_bb, bb, 3*sizeof(double), cudaMemcpyHostToDevice); dim3 dimGrid1((columns+TILE_DIM-1)/TILE_DIM,(rows+TILE_DIM-1)/TILE_DIM, 1); dim3 dimGrid2((rows+TILE_DIM-1)/TILE_DIM,(columns+TILE_DIM-1)/TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); // -------- Convolve Rows---------- transposeCoalesced<<< dimGrid1, dimBlock>>>(d_output, d_input, rows, columns); cuconvolve_youngCausal<<<rows/256 + 1 , 256>>>(d_output, d_input, columns, rows, B, d_bf); cuconvolve_youngAnticausal<<<rows/256 + 1, 256>>>(d_input, d_output, columns, rows, B, d_bb); // -------- Convolve Columns ---------- transposeCoalesced<<< dimGrid2, dimBlock>>>(d_input, d_output, columns, rows); cuconvolve_youngCausal<<<columns/256 + 1, 256>>>(d_input, d_output, rows, columns, B, d_bf); cuconvolve_youngAnticausal<<<columns/256 + 1, 256>>>(d_output, d_input, rows, columns, B, d_bb); cudaMemcpy(in, d_input, rows*columns*sizeof(double), cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Execution time elapsed: %f ms\n", milliseconds); cudaFree(d_input); cudaFree(d_output); cudaFree(d_bf); cudaFree(d_bb); }
#include <hip/hip_runtime.h> #include <stdio.h> const int TILE_DIM = 32; const int BLOCK_ROWS = 8; /** * the non-square CUDA transpose kernel is * writtern By Amir Hossein Bakhtiary, use as you wish. Shouldn't have any copyright problems. */ // http://amirsworklog.blogspot.gr/2015/01/cuda-matrix-transpose-code.html __global__ void transposeCoalesced(double *odata, const double *idata, int rows,int cols) { __shared__ double tile[TILE_DIM][TILE_DIM+1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; // if (x >= cols||y >= rows){ // return; // } int maxJ = TILE_DIM; int maxJ2 = TILE_DIM; int otherMaxJ = rows - y; if (maxJ > otherMaxJ) maxJ = otherMaxJ; if ( x < cols ){ for (int j = 0; j < maxJ; j += BLOCK_ROWS) tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*cols + x]; } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; int otherMaxJ2 = cols - y; if (maxJ2 > otherMaxJ2){ maxJ2 = otherMaxJ2; } if ( x < rows){ for (int j = 0; j < maxJ2; j += BLOCK_ROWS) odata[(y+j)*rows + x] = tile[threadIdx.x][threadIdx.y + j]; } } __global__ void cuconvolve_youngCausal(double * in, double * out, int rows, int columns, double B, double *bf) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<columns) { /* Compute first 3 output elements */ out[idx] = B*in[idx]; out[idx+columns] = B*in[idx+columns] + bf[2]*out[idx]; out[idx+2*columns] = B*in[idx+2*columns] + (bf[1]*out[idx]+bf[2]*out[idx+columns]); /* Recursive computation of output in forward direction using filter parameters bf and B */ for(int i=3; i<rows; i++) { out[idx+i*columns] = B*in[idx+i*columns]; for(int j=0; j<3; j++) { out[idx+i*columns] += bf[j]*out[idx + (i-(3-j))*columns]; } } } } __global__ void cuconvolve_youngAnticausal(double * in, double * out, int rows, int columns, double B, double *bb) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; int total = columns*(rows-1); if(idx<columns) { /* Compute last 3 output elements */ out[total + idx] = B*in[total + idx]; out[total + idx - columns] = B*in[total + idx - columns] + bb[0]*out[total + idx]; out[total + idx - 2*columns] = B*in[total + idx - 2*columns] + (bb[0]*out[total + idx - columns]+bb[1]*out[total + idx]); /* Recursive computation of output in backward direction using filter parameters bb and B */ for (int i=3; i<rows-1; i++) { out[total + idx - i*columns] = B*in[total + idx - i*columns]; for (int j=0; j<3; j++) { out[total + idx - i*columns] += bb[j]*out[total + idx - (i-(j+1))*columns]; } } } } extern "C" void cudaYoung(double * in, double * out, int rows, int columns, double *bf, double *bb, double B) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); /** \brief Array to store output of Causal filter convolution */ double *d_input, *d_output, *d_bf, *d_bb; hipMalloc((void**) &d_input, rows*columns*sizeof(double)); hipMalloc((void**) &d_output, rows*columns*sizeof(double)); hipMalloc((void**) &d_bf, rows*columns*sizeof(double)); hipMalloc((void**) &d_bb, rows*columns*sizeof(double)); hipMemcpy(d_input, in, rows*columns*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_bf, bf, 3*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_bb, bb, 3*sizeof(double), hipMemcpyHostToDevice); dim3 dimGrid1((columns+TILE_DIM-1)/TILE_DIM,(rows+TILE_DIM-1)/TILE_DIM, 1); dim3 dimGrid2((rows+TILE_DIM-1)/TILE_DIM,(columns+TILE_DIM-1)/TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); // -------- Convolve Rows---------- transposeCoalesced<<< dimGrid1, dimBlock>>>(d_output, d_input, rows, columns); cuconvolve_youngCausal<<<rows/256 + 1 , 256>>>(d_output, d_input, columns, rows, B, d_bf); cuconvolve_youngAnticausal<<<rows/256 + 1, 256>>>(d_input, d_output, columns, rows, B, d_bb); // -------- Convolve Columns ---------- transposeCoalesced<<< dimGrid2, dimBlock>>>(d_input, d_output, columns, rows); cuconvolve_youngCausal<<<columns/256 + 1, 256>>>(d_input, d_output, rows, columns, B, d_bf); cuconvolve_youngAnticausal<<<columns/256 + 1, 256>>>(d_output, d_input, rows, columns, B, d_bb); hipMemcpy(in, d_input, rows*columns*sizeof(double), hipMemcpyDeviceToHost); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Execution time elapsed: %f ms\n", milliseconds); hipFree(d_input); hipFree(d_output); hipFree(d_bf); hipFree(d_bb); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> const int TILE_DIM = 32; const int BLOCK_ROWS = 8; /** * the non-square CUDA transpose kernel is * writtern By Amir Hossein Bakhtiary, use as you wish. Shouldn't have any copyright problems. */ // http://amirsworklog.blogspot.gr/2015/01/cuda-matrix-transpose-code.html __global__ void transposeCoalesced(double *odata, const double *idata, int rows,int cols) { __shared__ double tile[TILE_DIM][TILE_DIM+1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; // if (x >= cols||y >= rows){ // return; // } int maxJ = TILE_DIM; int maxJ2 = TILE_DIM; int otherMaxJ = rows - y; if (maxJ > otherMaxJ) maxJ = otherMaxJ; if ( x < cols ){ for (int j = 0; j < maxJ; j += BLOCK_ROWS) tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*cols + x]; } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; int otherMaxJ2 = cols - y; if (maxJ2 > otherMaxJ2){ maxJ2 = otherMaxJ2; } if ( x < rows){ for (int j = 0; j < maxJ2; j += BLOCK_ROWS) odata[(y+j)*rows + x] = tile[threadIdx.x][threadIdx.y + j]; } } __global__ void cuconvolve_youngCausal(double * in, double * out, int rows, int columns, double B, double *bf) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<columns) { /* Compute first 3 output elements */ out[idx] = B*in[idx]; out[idx+columns] = B*in[idx+columns] + bf[2]*out[idx]; out[idx+2*columns] = B*in[idx+2*columns] + (bf[1]*out[idx]+bf[2]*out[idx+columns]); /* Recursive computation of output in forward direction using filter parameters bf and B */ for(int i=3; i<rows; i++) { out[idx+i*columns] = B*in[idx+i*columns]; for(int j=0; j<3; j++) { out[idx+i*columns] += bf[j]*out[idx + (i-(3-j))*columns]; } } } } __global__ void cuconvolve_youngAnticausal(double * in, double * out, int rows, int columns, double B, double *bb) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; int total = columns*(rows-1); if(idx<columns) { /* Compute last 3 output elements */ out[total + idx] = B*in[total + idx]; out[total + idx - columns] = B*in[total + idx - columns] + bb[0]*out[total + idx]; out[total + idx - 2*columns] = B*in[total + idx - 2*columns] + (bb[0]*out[total + idx - columns]+bb[1]*out[total + idx]); /* Recursive computation of output in backward direction using filter parameters bb and B */ for (int i=3; i<rows-1; i++) { out[total + idx - i*columns] = B*in[total + idx - i*columns]; for (int j=0; j<3; j++) { out[total + idx - i*columns] += bb[j]*out[total + idx - (i-(j+1))*columns]; } } } } extern "C" void cudaYoung(double * in, double * out, int rows, int columns, double *bf, double *bb, double B) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); /** \brief Array to store output of Causal filter convolution */ double *d_input, *d_output, *d_bf, *d_bb; hipMalloc((void**) &d_input, rows*columns*sizeof(double)); hipMalloc((void**) &d_output, rows*columns*sizeof(double)); hipMalloc((void**) &d_bf, rows*columns*sizeof(double)); hipMalloc((void**) &d_bb, rows*columns*sizeof(double)); hipMemcpy(d_input, in, rows*columns*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_bf, bf, 3*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_bb, bb, 3*sizeof(double), hipMemcpyHostToDevice); dim3 dimGrid1((columns+TILE_DIM-1)/TILE_DIM,(rows+TILE_DIM-1)/TILE_DIM, 1); dim3 dimGrid2((rows+TILE_DIM-1)/TILE_DIM,(columns+TILE_DIM-1)/TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); // -------- Convolve Rows---------- transposeCoalesced<<< dimGrid1, dimBlock>>>(d_output, d_input, rows, columns); cuconvolve_youngCausal<<<rows/256 + 1 , 256>>>(d_output, d_input, columns, rows, B, d_bf); cuconvolve_youngAnticausal<<<rows/256 + 1, 256>>>(d_input, d_output, columns, rows, B, d_bb); // -------- Convolve Columns ---------- transposeCoalesced<<< dimGrid2, dimBlock>>>(d_input, d_output, columns, rows); cuconvolve_youngCausal<<<columns/256 + 1, 256>>>(d_input, d_output, rows, columns, B, d_bf); cuconvolve_youngAnticausal<<<columns/256 + 1, 256>>>(d_output, d_input, rows, columns, B, d_bb); hipMemcpy(in, d_input, rows*columns*sizeof(double), hipMemcpyDeviceToHost); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Execution time elapsed: %f ms\n", milliseconds); hipFree(d_input); hipFree(d_output); hipFree(d_bf); hipFree(d_bb); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z18transposeCoalescedPdPKdii .globl _Z18transposeCoalescedPdPKdii .p2align 8 .type _Z18transposeCoalescedPdPKdii,@function _Z18transposeCoalescedPdPKdii: s_load_b64 s[4:5], s[0:1], 0x10 v_bfe_u32 v3, v0, 10, 10 s_lshl_b32 s6, s15, 5 v_and_b32_e32 v2, 0x3ff, v0 s_lshl_b32 s7, s14, 5 s_mov_b32 s8, 0 v_add_nc_u32_e32 v0, s6, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v4, s7, v2 s_waitcnt lgkmcnt(0) v_sub_nc_u32_e32 v1, s4, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_i32_e32 vcc_lo, s5, v4 v_cmp_lt_i32_e64 s2, 0, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, vcc_lo, s2 s_and_saveexec_b32 s9, s2 s_cbranch_execz .LBB0_3 s_load_b64 s[2:3], s[0:1], 0x8 v_mul_lo_u32 v0, s5, v0 v_lshlrev_b32_e32 v5, 3, v2 v_min_i32_e32 v4, 32, v1 s_lshl_b32 s10, s5, 3 s_mov_b32 s11, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_mad_u32_u24 v5, v3, 0x108, v5 v_add3_u32 v0, v2, v0, s7 .p2align 6 .LBB0_2: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v1, 31, v0 s_add_i32 s11, s11, 8 v_lshlrev_b64 v[6:7], 3, v[0:1] v_add_nc_u32_e32 v0, s10, v0 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v6, vcc_lo, s2, v6 v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo v_cmp_ge_i32_e32 vcc_lo, s11, v4 global_load_b64 v[6:7], v[6:7], off s_or_b32 s8, vcc_lo, s8 s_waitcnt vmcnt(0) ds_store_b64 v5, v[6:7] v_add_nc_u32_e32 v5, 0x840, v5 s_and_not1_b32 exec_lo, exec_lo, s8 s_cbranch_execnz .LBB0_2 .LBB0_3: s_or_b32 exec_lo, exec_lo, s9 v_add_nc_u32_e32 v1, s7, v3 v_add_nc_u32_e32 v4, s6, v2 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv v_sub_nc_u32_e32 v0, s5, v1 v_cmp_gt_i32_e32 vcc_lo, s4, v4 s_mov_b32 s5, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_lt_i32_e64 s2, 0, v0 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_6 s_load_b64 s[2:3], s[0:1], 0x0 v_mul_lo_u32 v1, s4, v1 v_lshlrev_b32_e32 v4, 3, v3 v_min_i32_e32 v3, 32, v0 s_lshl_b32 s1, s4, 3 s_mov_b32 s4, 0 s_delay_alu instid0(VALU_DEP_3) v_add3_u32 v0, v2, v1, s6 v_mad_u32_u24 v2, v2, 0x108, v4 .p2align 6 .LBB0_5: ds_load_b64 v[4:5], v2 v_ashrrev_i32_e32 v1, 31, v0 s_add_i32 s4, s4, 8 v_add_nc_u32_e32 v2, 64, v2 v_cmp_ge_i32_e32 vcc_lo, s4, v3 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2) v_lshlrev_b64 v[6:7], 3, v[0:1] v_add_nc_u32_e32 v0, s1, v0 s_or_b32 s5, vcc_lo, s5 s_waitcnt lgkmcnt(0) v_add_co_u32 v6, s0, s2, v6 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v7, s0, s3, v7, s0 global_store_b64 v[6:7], v[4:5], off s_and_not1_b32 exec_lo, exec_lo, s5 s_cbranch_execnz .LBB0_5 .LBB0_6: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18transposeCoalescedPdPKdii .amdhsa_group_segment_fixed_size 8448 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z18transposeCoalescedPdPKdii, .Lfunc_end0-_Z18transposeCoalescedPdPKdii .section .AMDGPU.csdata,"",@progbits .text .protected _Z22cuconvolve_youngCausalPdS_iidS_ .globl _Z22cuconvolve_youngCausalPdS_iidS_ .p2align 8 .type _Z22cuconvolve_youngCausalPdS_iidS_,@function _Z22cuconvolve_youngCausalPdS_iidS_: s_clause 0x1 s_load_b32 s2, s[0:1], 0x34 s_load_b32 s10, s[0:1], 0x14 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_u32_e64 s10, v1 s_cbranch_execz .LBB1_5 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b32 s11, s[0:1], 0x10 v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v9, s10, v1 s_load_b128 s[0:3], s[0:1], 0x18 v_mov_b32_e32 v0, 0 s_mov_b32 s12, 3 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 3, v[1:2] v_mov_b32_e32 v10, v2 v_lshlrev_b64 v[9:10], 3, v[9:10] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v5, vcc_lo, s4, v3 v_add_co_ci_u32_e32 v6, vcc_lo, s5, v4, vcc_lo v_add_co_u32 v3, vcc_lo, s6, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo global_load_b64 v[5:6], v[5:6], off v_add_co_u32 v11, vcc_lo, s4, v9 v_add_co_ci_u32_e32 v12, vcc_lo, s5, v10, vcc_lo s_cmp_lt_i32 s11, 4 s_waitcnt vmcnt(0) v_mul_f64 v[5:6], v[5:6], s[0:1] global_store_b64 v[3:4], v[5:6], off global_load_b64 v[7:8], v0, s[2:3] offset:16 global_load_b64 v[11:12], v[11:12], off s_waitcnt vmcnt(1) v_mul_f64 v[5:6], v[5:6], v[7:8] v_add_co_u32 v7, vcc_lo, s6, v9 v_add_co_ci_u32_e32 v8, vcc_lo, s7, v10, vcc_lo s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_fma_f64 v[5:6], v[11:12], s[0:1], v[5:6] v_lshl_add_u32 v11, s10, 1, v1 v_mov_b32_e32 v12, v2 v_lshlrev_b64 v[11:12], 3, v[11:12] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v13, vcc_lo, s4, v11 v_add_co_ci_u32_e32 v14, vcc_lo, s5, v12, vcc_lo global_store_b64 v[7:8], v[5:6], off s_clause 0x1 global_load_b64 v[7:8], v0, s[2:3] offset:16 global_load_b64 v[9:10], v0, s[2:3] offset:8 global_load_b64 v[3:4], v[3:4], off global_load_b64 v[13:14], v[13:14], off s_waitcnt vmcnt(3) v_mul_f64 v[5:6], v[5:6], v[7:8] s_waitcnt vmcnt(1) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) v_fma_f64 v[2:3], v[9:10], v[3:4], v[5:6] v_add_co_u32 v4, vcc_lo, s6, v11 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v12, vcc_lo s_waitcnt vmcnt(0) v_fma_f64 v[2:3], v[13:14], s[0:1], v[2:3] global_store_b64 v[4:5], v[2:3], off s_cbranch_scc1 .LBB1_5 .p2align 6 .LBB1_2: v_mad_u64_u32 v[2:3], null, s12, s10, v[1:2] v_mov_b32_e32 v3, v0 s_mov_b32 s13, 0 s_mov_b64 s[8:9], s[2:3] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 3, v[2:3] v_add_co_u32 v2, vcc_lo, s4, v4 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s5, v5, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo global_load_b64 v[2:3], v[2:3], off s_waitcnt vmcnt(0) v_mul_f64 v[2:3], v[2:3], s[0:1] global_store_b64 v[4:5], v[2:3], off .p2align 6 .LBB1_3: s_xor_b32 s14, s13, 3 s_add_i32 s13, s13, 1 s_sub_i32 s14, s12, s14 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[6:7], null, s14, s10, v[1:2] v_mov_b32_e32 v7, v0 v_lshlrev_b64 v[6:7], 3, v[6:7] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v6, vcc_lo, s6, v6 v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo global_load_b64 v[8:9], v0, s[8:9] global_load_b64 v[6:7], v[6:7], off s_add_u32 s8, s8, 8 s_addc_u32 s9, s9, 0 s_cmp_eq_u32 s13, 3 s_waitcnt vmcnt(0) v_fma_f64 v[2:3], v[8:9], v[6:7], v[2:3] global_store_b64 v[4:5], v[2:3], off s_cbranch_scc0 .LBB1_3 s_add_i32 s12, s12, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s12, s11 s_cbranch_scc0 .LBB1_2 .LBB1_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z22cuconvolve_youngCausalPdS_iidS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 15 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z22cuconvolve_youngCausalPdS_iidS_, .Lfunc_end1-_Z22cuconvolve_youngCausalPdS_iidS_ .section .AMDGPU.csdata,"",@progbits .text .protected _Z26cuconvolve_youngAnticausalPdS_iidS_ .globl _Z26cuconvolve_youngAnticausalPdS_iidS_ .p2align 8 .type _Z26cuconvolve_youngAnticausalPdS_iidS_,@function _Z26cuconvolve_youngAnticausalPdS_iidS_: s_clause 0x1 s_load_b32 s2, s[0:1], 0x34 s_load_b32 s10, s[0:1], 0x14 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_mul_i32 s15, s15, s2 s_mov_b32 s2, exec_lo v_add_nc_u32_e32 v3, s15, v0 v_cmpx_gt_u32_e64 s10, v3 s_cbranch_execz .LBB2_6 s_clause 0x2 s_load_b32 s8, s[0:1], 0x10 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b128 s[0:3], s[0:1], 0x18 s_lshl_b32 s9, s10, 1 s_mov_b32 s12, 3 s_waitcnt lgkmcnt(0) s_add_i32 s11, s8, -1 s_cmp_lt_i32 s8, 5 v_mad_u64_u32 v[1:2], null, s11, s10, v[3:4] v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[3:4], 3, v[1:2] v_subrev_nc_u32_e32 v12, s10, v1 v_add_co_u32 v5, vcc_lo, s4, v3 s_delay_alu instid0(VALU_DEP_3) v_add_co_ci_u32_e32 v6, vcc_lo, s5, v4, vcc_lo v_add_co_u32 v8, vcc_lo, s6, v3 v_mov_b32_e32 v3, 0 global_load_b64 v[5:6], v[5:6], off v_add_co_ci_u32_e32 v9, vcc_lo, s7, v4, vcc_lo v_mov_b32_e32 v13, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[12:13], 3, v[12:13] v_add_co_u32 v14, vcc_lo, s4, v12 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v15, vcc_lo, s5, v13, vcc_lo s_waitcnt vmcnt(0) v_mul_f64 v[5:6], v[5:6], s[0:1] global_store_b64 v[8:9], v[5:6], off global_load_b64 v[10:11], v3, s[2:3] global_load_b64 v[14:15], v[14:15], off s_waitcnt vmcnt(1) v_mul_f64 v[4:5], v[5:6], v[10:11] s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1) v_fma_f64 v[10:11], v[14:15], s[0:1], v[4:5] v_add_co_u32 v4, vcc_lo, s6, v12 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v13, vcc_lo v_subrev_nc_u32_e32 v12, s9, v1 v_mov_b32_e32 v13, v2 v_lshlrev_b64 v[12:13], 3, v[12:13] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v14, vcc_lo, s4, v12 v_add_co_ci_u32_e32 v15, vcc_lo, s5, v13, vcc_lo global_store_b64 v[4:5], v[10:11], off global_load_b128 v[4:7], v3, s[2:3] global_load_b64 v[8:9], v[8:9], off global_load_b64 v[14:15], v[14:15], off s_waitcnt vmcnt(1) v_mul_f64 v[6:7], v[6:7], v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) v_fma_f64 v[4:5], v[10:11], v[4:5], v[6:7] v_add_co_u32 v6, vcc_lo, s6, v12 v_add_co_ci_u32_e32 v7, vcc_lo, s7, v13, vcc_lo s_waitcnt vmcnt(0) v_fma_f64 v[4:5], v[14:15], s[0:1], v[4:5] global_store_b64 v[6:7], v[4:5], off s_cbranch_scc1 .LBB2_6 s_add_i32 s8, s8, -3 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_i32 s8, s10, s8 v_add3_u32 v0, s15, s8, v0 s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB2_3: s_mul_i32 s8, s12, s10 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_subrev_nc_u32_e32 v2, s8, v1 s_mov_b64 s[8:9], 0 v_lshlrev_b64 v[6:7], 3, v[2:3] v_mov_b32_e32 v2, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v4, vcc_lo, s4, v6 v_add_co_ci_u32_e32 v5, vcc_lo, s5, v7, vcc_lo v_add_co_u32 v6, vcc_lo, s6, v6 v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo global_load_b64 v[4:5], v[4:5], off s_waitcnt vmcnt(0) v_mul_f64 v[4:5], v[4:5], s[0:1] global_store_b64 v[6:7], v[4:5], off .p2align 6 .LBB2_4: v_lshlrev_b64 v[8:9], 3, v[2:3] s_add_u32 s14, s2, s8 s_addc_u32 s15, s3, s9 v_add_nc_u32_e32 v2, s10, v2 s_add_u32 s8, s8, 8 s_addc_u32 s9, s9, 0 v_add_co_u32 v8, vcc_lo, s6, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s7, v9, vcc_lo s_cmp_eq_u32 s8, 24 global_load_b64 v[10:11], v3, s[14:15] global_load_b64 v[8:9], v[8:9], off s_waitcnt vmcnt(0) v_fma_f64 v[4:5], v[10:11], v[8:9], v[4:5] global_store_b64 v[6:7], v[4:5], off s_cbranch_scc0 .LBB2_4 v_subrev_nc_u32_e32 v0, s10, v0 s_add_i32 s12, s12, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s12, s11 s_cbranch_scc0 .LBB2_3 .LBB2_6: s_set_inst_prefetch_distance 0x2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z26cuconvolve_youngAnticausalPdS_iidS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 16 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z26cuconvolve_youngAnticausalPdS_iidS_, .Lfunc_end2-_Z26cuconvolve_youngAnticausalPdS_iidS_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value .group_segment_fixed_size: 8448 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18transposeCoalescedPdPKdii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z18transposeCoalescedPdPKdii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 8 .value_kind: by_value - .address_space: global .offset: 32 .size: 8 .value_kind: global_buffer - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z22cuconvolve_youngCausalPdS_iidS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z22cuconvolve_youngCausalPdS_iidS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 15 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 8 .value_kind: by_value - .address_space: global .offset: 32 .size: 8 .value_kind: global_buffer - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z26cuconvolve_youngAnticausalPdS_iidS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z26cuconvolve_youngAnticausalPdS_iidS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 16 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> const int TILE_DIM = 32; const int BLOCK_ROWS = 8; /** * the non-square CUDA transpose kernel is * writtern By Amir Hossein Bakhtiary, use as you wish. Shouldn't have any copyright problems. */ // http://amirsworklog.blogspot.gr/2015/01/cuda-matrix-transpose-code.html __global__ void transposeCoalesced(double *odata, const double *idata, int rows,int cols) { __shared__ double tile[TILE_DIM][TILE_DIM+1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; // if (x >= cols||y >= rows){ // return; // } int maxJ = TILE_DIM; int maxJ2 = TILE_DIM; int otherMaxJ = rows - y; if (maxJ > otherMaxJ) maxJ = otherMaxJ; if ( x < cols ){ for (int j = 0; j < maxJ; j += BLOCK_ROWS) tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*cols + x]; } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; int otherMaxJ2 = cols - y; if (maxJ2 > otherMaxJ2){ maxJ2 = otherMaxJ2; } if ( x < rows){ for (int j = 0; j < maxJ2; j += BLOCK_ROWS) odata[(y+j)*rows + x] = tile[threadIdx.x][threadIdx.y + j]; } } __global__ void cuconvolve_youngCausal(double * in, double * out, int rows, int columns, double B, double *bf) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<columns) { /* Compute first 3 output elements */ out[idx] = B*in[idx]; out[idx+columns] = B*in[idx+columns] + bf[2]*out[idx]; out[idx+2*columns] = B*in[idx+2*columns] + (bf[1]*out[idx]+bf[2]*out[idx+columns]); /* Recursive computation of output in forward direction using filter parameters bf and B */ for(int i=3; i<rows; i++) { out[idx+i*columns] = B*in[idx+i*columns]; for(int j=0; j<3; j++) { out[idx+i*columns] += bf[j]*out[idx + (i-(3-j))*columns]; } } } } __global__ void cuconvolve_youngAnticausal(double * in, double * out, int rows, int columns, double B, double *bb) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; int total = columns*(rows-1); if(idx<columns) { /* Compute last 3 output elements */ out[total + idx] = B*in[total + idx]; out[total + idx - columns] = B*in[total + idx - columns] + bb[0]*out[total + idx]; out[total + idx - 2*columns] = B*in[total + idx - 2*columns] + (bb[0]*out[total + idx - columns]+bb[1]*out[total + idx]); /* Recursive computation of output in backward direction using filter parameters bb and B */ for (int i=3; i<rows-1; i++) { out[total + idx - i*columns] = B*in[total + idx - i*columns]; for (int j=0; j<3; j++) { out[total + idx - i*columns] += bb[j]*out[total + idx - (i-(j+1))*columns]; } } } } extern "C" void cudaYoung(double * in, double * out, int rows, int columns, double *bf, double *bb, double B) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); /** \brief Array to store output of Causal filter convolution */ double *d_input, *d_output, *d_bf, *d_bb; hipMalloc((void**) &d_input, rows*columns*sizeof(double)); hipMalloc((void**) &d_output, rows*columns*sizeof(double)); hipMalloc((void**) &d_bf, rows*columns*sizeof(double)); hipMalloc((void**) &d_bb, rows*columns*sizeof(double)); hipMemcpy(d_input, in, rows*columns*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_bf, bf, 3*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_bb, bb, 3*sizeof(double), hipMemcpyHostToDevice); dim3 dimGrid1((columns+TILE_DIM-1)/TILE_DIM,(rows+TILE_DIM-1)/TILE_DIM, 1); dim3 dimGrid2((rows+TILE_DIM-1)/TILE_DIM,(columns+TILE_DIM-1)/TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); // -------- Convolve Rows---------- transposeCoalesced<<< dimGrid1, dimBlock>>>(d_output, d_input, rows, columns); cuconvolve_youngCausal<<<rows/256 + 1 , 256>>>(d_output, d_input, columns, rows, B, d_bf); cuconvolve_youngAnticausal<<<rows/256 + 1, 256>>>(d_input, d_output, columns, rows, B, d_bb); // -------- Convolve Columns ---------- transposeCoalesced<<< dimGrid2, dimBlock>>>(d_input, d_output, columns, rows); cuconvolve_youngCausal<<<columns/256 + 1, 256>>>(d_input, d_output, rows, columns, B, d_bf); cuconvolve_youngAnticausal<<<columns/256 + 1, 256>>>(d_output, d_input, rows, columns, B, d_bb); hipMemcpy(in, d_input, rows*columns*sizeof(double), hipMemcpyDeviceToHost); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Execution time elapsed: %f ms\n", milliseconds); hipFree(d_input); hipFree(d_output); hipFree(d_bf); hipFree(d_bb); }
.text .file "cuda_young.hip" .globl _Z33__device_stub__transposeCoalescedPdPKdii # -- Begin function _Z33__device_stub__transposeCoalescedPdPKdii .p2align 4, 0x90 .type _Z33__device_stub__transposeCoalescedPdPKdii,@function _Z33__device_stub__transposeCoalescedPdPKdii: # @_Z33__device_stub__transposeCoalescedPdPKdii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18transposeCoalescedPdPKdii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z33__device_stub__transposeCoalescedPdPKdii, .Lfunc_end0-_Z33__device_stub__transposeCoalescedPdPKdii .cfi_endproc # -- End function .globl _Z37__device_stub__cuconvolve_youngCausalPdS_iidS_ # -- Begin function _Z37__device_stub__cuconvolve_youngCausalPdS_iidS_ .p2align 4, 0x90 .type _Z37__device_stub__cuconvolve_youngCausalPdS_iidS_,@function _Z37__device_stub__cuconvolve_youngCausalPdS_iidS_: # @_Z37__device_stub__cuconvolve_youngCausalPdS_iidS_ .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movsd %xmm0, 72(%rsp) movq %r8, 64(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 72(%rsp), %rax movq %rax, 128(%rsp) leaq 64(%rsp), %rax movq %rax, 136(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z22cuconvolve_youngCausalPdS_iidS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end1: .size _Z37__device_stub__cuconvolve_youngCausalPdS_iidS_, .Lfunc_end1-_Z37__device_stub__cuconvolve_youngCausalPdS_iidS_ .cfi_endproc # -- End function .globl _Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_ # -- Begin function _Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_ .p2align 4, 0x90 .type _Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_,@function _Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_: # @_Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_ .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movsd %xmm0, 72(%rsp) movq %r8, 64(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 72(%rsp), %rax movq %rax, 128(%rsp) leaq 64(%rsp), %rax movq %rax, 136(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z26cuconvolve_youngAnticausalPdS_iidS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end2: .size _Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_, .Lfunc_end2-_Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_ .cfi_endproc # -- End function .globl cudaYoung # -- Begin function cudaYoung .p2align 4, 0x90 .type cudaYoung,@function cudaYoung: # @cudaYoung .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $216, %rsp .cfi_def_cfa_offset 272 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movsd %xmm0, 168(%rsp) # 8-byte Spill movq %r9, %r13 movq %r8, %rbx movl %ecx, %r15d movl %edx, %r12d movq %rdi, %rbp leaq 200(%rsp), %rdi callq hipEventCreate leaq 192(%rsp), %rdi callq hipEventCreate movq 200(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movl %r15d, %eax imull %r12d, %eax movslq %eax, %r14 shlq $3, %r14 leaq 96(%rsp), %rdi movq %r14, %rsi callq hipMalloc leaq 104(%rsp), %rdi movq %r14, %rsi callq hipMalloc leaq 184(%rsp), %rdi movq %r14, %rsi callq hipMalloc leaq 176(%rsp), %rdi movq %r14, %rsi callq hipMalloc movq 96(%rsp), %rdi movq %rbp, 208(%rsp) # 8-byte Spill movq %rbp, %rsi movq %r14, %rdx movl $1, %ecx callq hipMemcpy movq 184(%rsp), %rdi movl $24, %edx movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movq 176(%rsp), %rdi movl $24, %edx movq %r13, %rsi movl $1, %ecx callq hipMemcpy leal 31(%r15), %eax leal 62(%r15), %ecx testl %eax, %eax cmovnsl %eax, %ecx sarl $5, %ecx leal 31(%r12), %eax leal 62(%r12), %r13d testl %eax, %eax cmovnsl %eax, %r13d sarl $5, %r13d shlq $32, %r13 orq %rcx, %r13 movq %r13, %rdi movl $1, %esi movabsq $34359738400, %rdx # imm = 0x800000020 movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_2 # %bb.1: movq 104(%rsp), %rax movq 96(%rsp), %rcx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movl %r12d, 24(%rsp) movl %r15d, 16(%rsp) leaq 88(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 72(%rsp), %rdx leaq 64(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z18transposeCoalescedPdPKdii, %edi pushq 64(%rsp) .cfi_adjust_cfa_offset 8 pushq 80(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_2: leal 255(%r12), %eax testl %r12d, %r12d cmovnsl %r12d, %eax movabsq $4294967552, %rbp # imm = 0x100000100 sarl $8, %eax incl %eax leaq (%rax,%rbp), %rbx addq $-256, %rbx movq %rbx, %rdi movl $1, %esi movq %rbp, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_4 # %bb.3: movq 104(%rsp), %rax movq 96(%rsp), %rcx movq 184(%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movl %r15d, 12(%rsp) movl %r12d, 8(%rsp) movsd 168(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movsd %xmm0, 72(%rsp) movq %rdx, 64(%rsp) leaq 88(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z22cuconvolve_youngCausalPdS_iidS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_4: rolq $32, %r13 movq %rbx, %rdi movl $1, %esi movq %rbp, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_6 # %bb.5: movq 96(%rsp), %rax movq 104(%rsp), %rcx movq 176(%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movl %r15d, 12(%rsp) movl %r12d, 8(%rsp) movsd 168(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movsd %xmm0, 72(%rsp) movq %rdx, 64(%rsp) leaq 88(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z26cuconvolve_youngAnticausalPdS_iidS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_6: movq %r13, %rdi movl $1, %esi movabsq $34359738400, %rdx # imm = 0x800000020 movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_8 # %bb.7: movq 96(%rsp), %rax movq 104(%rsp), %rcx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movl %r15d, 24(%rsp) movl %r12d, 16(%rsp) leaq 88(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 72(%rsp), %rdx leaq 64(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z18transposeCoalescedPdPKdii, %edi pushq 64(%rsp) .cfi_adjust_cfa_offset 8 pushq 80(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_8: leal 255(%r15), %eax testl %r15d, %r15d cmovnsl %r15d, %eax sarl $8, %eax incl %eax leaq (%rax,%rbp), %rbx addq $-256, %rbx movq %rbx, %rdi movl $1, %esi movq %rbp, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_10 # %bb.9: movq 96(%rsp), %rax movq 104(%rsp), %rcx movq 184(%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movl %r12d, 12(%rsp) movl %r15d, 8(%rsp) movsd 168(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movsd %xmm0, 72(%rsp) movq %rdx, 64(%rsp) leaq 88(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z22cuconvolve_youngCausalPdS_iidS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_10: movq %rbx, %rdi movl $1, %esi movq %rbp, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_12 # %bb.11: movq 104(%rsp), %rax movq 96(%rsp), %rcx movq 176(%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movl %r12d, 12(%rsp) movl %r15d, 8(%rsp) movsd 168(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movsd %xmm0, 72(%rsp) movq %rdx, 64(%rsp) leaq 88(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z26cuconvolve_youngAnticausalPdS_iidS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_12: movq 96(%rsp), %rsi movq 208(%rsp), %rdi # 8-byte Reload movq %r14, %rdx movl $2, %ecx callq hipMemcpy movq 192(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 192(%rsp), %rdi callq hipEventSynchronize movl $0, 112(%rsp) movq 200(%rsp), %rsi movq 192(%rsp), %rdx leaq 112(%rsp), %rdi callq hipEventElapsedTime movss 112(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf movq 96(%rsp), %rdi callq hipFree movq 104(%rsp), %rdi callq hipFree movq 184(%rsp), %rdi callq hipFree movq 176(%rsp), %rdi callq hipFree addq $216, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size cudaYoung, .Lfunc_end3-cudaYoung .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18transposeCoalescedPdPKdii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z22cuconvolve_youngCausalPdS_iidS_, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z26cuconvolve_youngAnticausalPdS_iidS_, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z18transposeCoalescedPdPKdii,@object # @_Z18transposeCoalescedPdPKdii .section .rodata,"a",@progbits .globl _Z18transposeCoalescedPdPKdii .p2align 3, 0x0 _Z18transposeCoalescedPdPKdii: .quad _Z33__device_stub__transposeCoalescedPdPKdii .size _Z18transposeCoalescedPdPKdii, 8 .type _Z22cuconvolve_youngCausalPdS_iidS_,@object # @_Z22cuconvolve_youngCausalPdS_iidS_ .globl _Z22cuconvolve_youngCausalPdS_iidS_ .p2align 3, 0x0 _Z22cuconvolve_youngCausalPdS_iidS_: .quad _Z37__device_stub__cuconvolve_youngCausalPdS_iidS_ .size _Z22cuconvolve_youngCausalPdS_iidS_, 8 .type _Z26cuconvolve_youngAnticausalPdS_iidS_,@object # @_Z26cuconvolve_youngAnticausalPdS_iidS_ .globl _Z26cuconvolve_youngAnticausalPdS_iidS_ .p2align 3, 0x0 _Z26cuconvolve_youngAnticausalPdS_iidS_: .quad _Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_ .size _Z26cuconvolve_youngAnticausalPdS_iidS_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Execution time elapsed: %f ms\n" .size .L.str, 31 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z18transposeCoalescedPdPKdii" .size .L__unnamed_1, 30 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z22cuconvolve_youngCausalPdS_iidS_" .size .L__unnamed_2, 36 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z26cuconvolve_youngAnticausalPdS_iidS_" .size .L__unnamed_3, 40 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z33__device_stub__transposeCoalescedPdPKdii .addrsig_sym _Z37__device_stub__cuconvolve_youngCausalPdS_iidS_ .addrsig_sym _Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z18transposeCoalescedPdPKdii .addrsig_sym _Z22cuconvolve_youngCausalPdS_iidS_ .addrsig_sym _Z26cuconvolve_youngAnticausalPdS_iidS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0015688f_00000000-6_cuda_young.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii .type _Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii, @function _Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii: .LFB2082: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z18transposeCoalescedPdPKdii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii, .-_Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii .globl _Z18transposeCoalescedPdPKdii .type _Z18transposeCoalescedPdPKdii, @function _Z18transposeCoalescedPdPKdii: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z18transposeCoalescedPdPKdii, .-_Z18transposeCoalescedPdPKdii .globl _Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_ .type _Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_, @function _Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_: .LFB2084: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movl %edx, 28(%rsp) movl %ecx, 24(%rsp) movsd %xmm0, 16(%rsp) movq %r8, 8(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 28(%rsp), %rax movq %rax, 128(%rsp) leaq 24(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 168(%rsp), %rax subq %fs:40, %rax jne .L16 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z22cuconvolve_youngCausalPdS_iidS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_, .-_Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_ .globl _Z22cuconvolve_youngCausalPdS_iidS_ .type _Z22cuconvolve_youngCausalPdS_iidS_, @function _Z22cuconvolve_youngCausalPdS_iidS_: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z22cuconvolve_youngCausalPdS_iidS_, .-_Z22cuconvolve_youngCausalPdS_iidS_ .globl _Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_ .type _Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_, @function _Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_: .LFB2086: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movl %edx, 28(%rsp) movl %ecx, 24(%rsp) movsd %xmm0, 16(%rsp) movq %r8, 8(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 28(%rsp), %rax movq %rax, 128(%rsp) leaq 24(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L23 .L19: movq 168(%rsp), %rax subq %fs:40, %rax jne .L24 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z26cuconvolve_youngAnticausalPdS_iidS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L19 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE2086: .size _Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_, .-_Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_ .globl _Z26cuconvolve_youngAnticausalPdS_iidS_ .type _Z26cuconvolve_youngAnticausalPdS_iidS_, @function _Z26cuconvolve_youngAnticausalPdS_iidS_: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _Z26cuconvolve_youngAnticausalPdS_iidS_, .-_Z26cuconvolve_youngAnticausalPdS_iidS_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "Execution time elapsed: %f ms\n" .text .globl cudaYoung .type cudaYoung, @function cudaYoung: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $152, %rsp .cfi_def_cfa_offset 208 movq %rdi, %r14 movl %edx, %ebx movl %ecx, %ebp movq %r8, %r15 movq %r9, %r13 movsd %xmm0, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rdi call cudaEventCreate@PLT leaq 32(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 24(%rsp), %rdi call cudaEventRecord@PLT movl %ebx, %r12d imull %ebp, %r12d movslq %r12d, %r12 salq $3, %r12 leaq 40(%rsp), %rdi movq %r12, %rsi call cudaMalloc@PLT leaq 48(%rsp), %rdi movq %r12, %rsi call cudaMalloc@PLT leaq 56(%rsp), %rdi movq %r12, %rsi call cudaMalloc@PLT leaq 64(%rsp), %rdi movq %r12, %rsi call cudaMalloc@PLT movl $1, %ecx movq %r12, %rdx movq %r14, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $24, %edx movq %r15, %rsi movq 56(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $24, %edx movq %r13, %rsi movq 64(%rsp), %rdi call cudaMemcpy@PLT leal 62(%rbx), %edx movl %ebx, %eax addl $31, %eax cmovns %eax, %edx sarl $5, %edx leal 62(%rbp), %eax movl %ebp, %ecx addl $31, %ecx cmovns %ecx, %eax sarl $5, %eax movl %eax, 76(%rsp) movl %edx, 80(%rsp) movl %edx, 88(%rsp) movl %eax, 92(%rsp) movl $1, 96(%rsp) movl $32, 100(%rsp) movl $8, 104(%rsp) movl $1, 108(%rsp) movl $0, %r9d movl $0, %r8d movq 100(%rsp), %rdx movl $1, %ecx movq 76(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L36 .L28: movl $256, 124(%rsp) movl $1, 128(%rsp) movl $1, 132(%rsp) leal 255(%rbx), %r13d testl %ebx, %ebx cmovns %ebx, %r13d sarl $8, %r13d addl $1, %r13d movl %r13d, 112(%rsp) movl $1, 116(%rsp) movl $1, 120(%rsp) movl $0, %r9d movl $0, %r8d movq 124(%rsp), %rdx movl $1, %ecx movq 112(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L37 .L29: movl $256, 124(%rsp) movl $1, 128(%rsp) movl $1, 132(%rsp) movl %r13d, 112(%rsp) movl $1, 116(%rsp) movl $1, 120(%rsp) movl $0, %r9d movl $0, %r8d movq 124(%rsp), %rdx movl $1, %ecx movq 112(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L38 .L30: movl 108(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 100(%rsp), %rdx movq 88(%rsp), %rdi movl 96(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L39 .L31: movl $256, 124(%rsp) movl $1, 128(%rsp) movl $1, 132(%rsp) leal 255(%rbp), %r13d testl %ebp, %ebp cmovns %ebp, %r13d sarl $8, %r13d addl $1, %r13d movl %r13d, 112(%rsp) movl $1, 116(%rsp) movl $1, 120(%rsp) movl $0, %r9d movl $0, %r8d movq 124(%rsp), %rdx movl $1, %ecx movq 112(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L40 .L32: movl $256, 124(%rsp) movl $1, 128(%rsp) movl $1, 132(%rsp) movl %r13d, 112(%rsp) movl $1, 116(%rsp) movl $1, 120(%rsp) movl $0, %r9d movl $0, %r8d movq 124(%rsp), %rdx movl $1, %ecx movq 112(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L41 .L33: movl $2, %ecx movq %r12, %rdx movq 40(%rsp), %rsi movq %r14, %rdi call cudaMemcpy@PLT movl $0, %esi movq 32(%rsp), %rdi call cudaEventRecord@PLT movq 32(%rsp), %rdi call cudaEventSynchronize@PLT movl $0x00000000, 124(%rsp) leaq 124(%rsp), %rdi movq 32(%rsp), %rdx movq 24(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 124(%rsp), %xmm0 leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq 48(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rdi call cudaFree@PLT movq 64(%rsp), %rdi call cudaFree@PLT movq 136(%rsp), %rax subq %fs:40, %rax jne .L42 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L36: .cfi_restore_state movl %ebp, %ecx movl %ebx, %edx movq 40(%rsp), %rsi movq 48(%rsp), %rdi call _Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii jmp .L28 .L37: movq 56(%rsp), %r8 movsd 8(%rsp), %xmm0 movl %ebx, %ecx movl %ebp, %edx movq 40(%rsp), %rsi movq 48(%rsp), %rdi call _Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_ jmp .L29 .L38: movq 64(%rsp), %r8 movsd 8(%rsp), %xmm0 movl %ebx, %ecx movl %ebp, %edx movq 48(%rsp), %rsi movq 40(%rsp), %rdi call _Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_ jmp .L30 .L39: movl %ebx, %ecx movl %ebp, %edx movq 48(%rsp), %rsi movq 40(%rsp), %rdi call _Z43__device_stub__Z18transposeCoalescedPdPKdiiPdPKdii jmp .L31 .L40: movq 56(%rsp), %r8 movsd 8(%rsp), %xmm0 movl %ebp, %ecx movl %ebx, %edx movq 48(%rsp), %rsi movq 40(%rsp), %rdi call _Z49__device_stub__Z22cuconvolve_youngCausalPdS_iidS_PdS_iidS_ jmp .L32 .L41: movq 64(%rsp), %r8 movsd 8(%rsp), %xmm0 movl %ebp, %ecx movl %ebx, %edx movq 40(%rsp), %rsi movq 48(%rsp), %rdi call _Z53__device_stub__Z26cuconvolve_youngAnticausalPdS_iidS_PdS_iidS_ jmp .L33 .L42: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size cudaYoung, .-cudaYoung .section .rodata.str1.8 .align 8 .LC2: .string "_Z26cuconvolve_youngAnticausalPdS_iidS_" .align 8 .LC3: .string "_Z22cuconvolve_youngCausalPdS_iidS_" .section .rodata.str1.1,"aMS",@progbits,1 .LC4: .string "_Z18transposeCoalescedPdPKdii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2089: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z26cuconvolve_youngAnticausalPdS_iidS_(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z22cuconvolve_youngCausalPdS_iidS_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z18transposeCoalescedPdPKdii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "cuda_young.hip" .globl _Z33__device_stub__transposeCoalescedPdPKdii # -- Begin function _Z33__device_stub__transposeCoalescedPdPKdii .p2align 4, 0x90 .type _Z33__device_stub__transposeCoalescedPdPKdii,@function _Z33__device_stub__transposeCoalescedPdPKdii: # @_Z33__device_stub__transposeCoalescedPdPKdii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18transposeCoalescedPdPKdii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z33__device_stub__transposeCoalescedPdPKdii, .Lfunc_end0-_Z33__device_stub__transposeCoalescedPdPKdii .cfi_endproc # -- End function .globl _Z37__device_stub__cuconvolve_youngCausalPdS_iidS_ # -- Begin function _Z37__device_stub__cuconvolve_youngCausalPdS_iidS_ .p2align 4, 0x90 .type _Z37__device_stub__cuconvolve_youngCausalPdS_iidS_,@function _Z37__device_stub__cuconvolve_youngCausalPdS_iidS_: # @_Z37__device_stub__cuconvolve_youngCausalPdS_iidS_ .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movsd %xmm0, 72(%rsp) movq %r8, 64(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 72(%rsp), %rax movq %rax, 128(%rsp) leaq 64(%rsp), %rax movq %rax, 136(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z22cuconvolve_youngCausalPdS_iidS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end1: .size _Z37__device_stub__cuconvolve_youngCausalPdS_iidS_, .Lfunc_end1-_Z37__device_stub__cuconvolve_youngCausalPdS_iidS_ .cfi_endproc # -- End function .globl _Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_ # -- Begin function _Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_ .p2align 4, 0x90 .type _Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_,@function _Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_: # @_Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_ .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movsd %xmm0, 72(%rsp) movq %r8, 64(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 72(%rsp), %rax movq %rax, 128(%rsp) leaq 64(%rsp), %rax movq %rax, 136(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z26cuconvolve_youngAnticausalPdS_iidS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end2: .size _Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_, .Lfunc_end2-_Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_ .cfi_endproc # -- End function .globl cudaYoung # -- Begin function cudaYoung .p2align 4, 0x90 .type cudaYoung,@function cudaYoung: # @cudaYoung .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $216, %rsp .cfi_def_cfa_offset 272 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movsd %xmm0, 168(%rsp) # 8-byte Spill movq %r9, %r13 movq %r8, %rbx movl %ecx, %r15d movl %edx, %r12d movq %rdi, %rbp leaq 200(%rsp), %rdi callq hipEventCreate leaq 192(%rsp), %rdi callq hipEventCreate movq 200(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movl %r15d, %eax imull %r12d, %eax movslq %eax, %r14 shlq $3, %r14 leaq 96(%rsp), %rdi movq %r14, %rsi callq hipMalloc leaq 104(%rsp), %rdi movq %r14, %rsi callq hipMalloc leaq 184(%rsp), %rdi movq %r14, %rsi callq hipMalloc leaq 176(%rsp), %rdi movq %r14, %rsi callq hipMalloc movq 96(%rsp), %rdi movq %rbp, 208(%rsp) # 8-byte Spill movq %rbp, %rsi movq %r14, %rdx movl $1, %ecx callq hipMemcpy movq 184(%rsp), %rdi movl $24, %edx movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movq 176(%rsp), %rdi movl $24, %edx movq %r13, %rsi movl $1, %ecx callq hipMemcpy leal 31(%r15), %eax leal 62(%r15), %ecx testl %eax, %eax cmovnsl %eax, %ecx sarl $5, %ecx leal 31(%r12), %eax leal 62(%r12), %r13d testl %eax, %eax cmovnsl %eax, %r13d sarl $5, %r13d shlq $32, %r13 orq %rcx, %r13 movq %r13, %rdi movl $1, %esi movabsq $34359738400, %rdx # imm = 0x800000020 movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_2 # %bb.1: movq 104(%rsp), %rax movq 96(%rsp), %rcx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movl %r12d, 24(%rsp) movl %r15d, 16(%rsp) leaq 88(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 72(%rsp), %rdx leaq 64(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z18transposeCoalescedPdPKdii, %edi pushq 64(%rsp) .cfi_adjust_cfa_offset 8 pushq 80(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_2: leal 255(%r12), %eax testl %r12d, %r12d cmovnsl %r12d, %eax movabsq $4294967552, %rbp # imm = 0x100000100 sarl $8, %eax incl %eax leaq (%rax,%rbp), %rbx addq $-256, %rbx movq %rbx, %rdi movl $1, %esi movq %rbp, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_4 # %bb.3: movq 104(%rsp), %rax movq 96(%rsp), %rcx movq 184(%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movl %r15d, 12(%rsp) movl %r12d, 8(%rsp) movsd 168(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movsd %xmm0, 72(%rsp) movq %rdx, 64(%rsp) leaq 88(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z22cuconvolve_youngCausalPdS_iidS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_4: rolq $32, %r13 movq %rbx, %rdi movl $1, %esi movq %rbp, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_6 # %bb.5: movq 96(%rsp), %rax movq 104(%rsp), %rcx movq 176(%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movl %r15d, 12(%rsp) movl %r12d, 8(%rsp) movsd 168(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movsd %xmm0, 72(%rsp) movq %rdx, 64(%rsp) leaq 88(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z26cuconvolve_youngAnticausalPdS_iidS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_6: movq %r13, %rdi movl $1, %esi movabsq $34359738400, %rdx # imm = 0x800000020 movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_8 # %bb.7: movq 96(%rsp), %rax movq 104(%rsp), %rcx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movl %r15d, 24(%rsp) movl %r12d, 16(%rsp) leaq 88(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 72(%rsp), %rdx leaq 64(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z18transposeCoalescedPdPKdii, %edi pushq 64(%rsp) .cfi_adjust_cfa_offset 8 pushq 80(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_8: leal 255(%r15), %eax testl %r15d, %r15d cmovnsl %r15d, %eax sarl $8, %eax incl %eax leaq (%rax,%rbp), %rbx addq $-256, %rbx movq %rbx, %rdi movl $1, %esi movq %rbp, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_10 # %bb.9: movq 96(%rsp), %rax movq 104(%rsp), %rcx movq 184(%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movl %r12d, 12(%rsp) movl %r15d, 8(%rsp) movsd 168(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movsd %xmm0, 72(%rsp) movq %rdx, 64(%rsp) leaq 88(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z22cuconvolve_youngCausalPdS_iidS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_10: movq %rbx, %rdi movl $1, %esi movq %rbp, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_12 # %bb.11: movq 104(%rsp), %rax movq 96(%rsp), %rcx movq 176(%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movl %r12d, 12(%rsp) movl %r15d, 8(%rsp) movsd 168(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movsd %xmm0, 72(%rsp) movq %rdx, 64(%rsp) leaq 88(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z26cuconvolve_youngAnticausalPdS_iidS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_12: movq 96(%rsp), %rsi movq 208(%rsp), %rdi # 8-byte Reload movq %r14, %rdx movl $2, %ecx callq hipMemcpy movq 192(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 192(%rsp), %rdi callq hipEventSynchronize movl $0, 112(%rsp) movq 200(%rsp), %rsi movq 192(%rsp), %rdx leaq 112(%rsp), %rdi callq hipEventElapsedTime movss 112(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf movq 96(%rsp), %rdi callq hipFree movq 104(%rsp), %rdi callq hipFree movq 184(%rsp), %rdi callq hipFree movq 176(%rsp), %rdi callq hipFree addq $216, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size cudaYoung, .Lfunc_end3-cudaYoung .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18transposeCoalescedPdPKdii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z22cuconvolve_youngCausalPdS_iidS_, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z26cuconvolve_youngAnticausalPdS_iidS_, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z18transposeCoalescedPdPKdii,@object # @_Z18transposeCoalescedPdPKdii .section .rodata,"a",@progbits .globl _Z18transposeCoalescedPdPKdii .p2align 3, 0x0 _Z18transposeCoalescedPdPKdii: .quad _Z33__device_stub__transposeCoalescedPdPKdii .size _Z18transposeCoalescedPdPKdii, 8 .type _Z22cuconvolve_youngCausalPdS_iidS_,@object # @_Z22cuconvolve_youngCausalPdS_iidS_ .globl _Z22cuconvolve_youngCausalPdS_iidS_ .p2align 3, 0x0 _Z22cuconvolve_youngCausalPdS_iidS_: .quad _Z37__device_stub__cuconvolve_youngCausalPdS_iidS_ .size _Z22cuconvolve_youngCausalPdS_iidS_, 8 .type _Z26cuconvolve_youngAnticausalPdS_iidS_,@object # @_Z26cuconvolve_youngAnticausalPdS_iidS_ .globl _Z26cuconvolve_youngAnticausalPdS_iidS_ .p2align 3, 0x0 _Z26cuconvolve_youngAnticausalPdS_iidS_: .quad _Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_ .size _Z26cuconvolve_youngAnticausalPdS_iidS_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Execution time elapsed: %f ms\n" .size .L.str, 31 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z18transposeCoalescedPdPKdii" .size .L__unnamed_1, 30 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z22cuconvolve_youngCausalPdS_iidS_" .size .L__unnamed_2, 36 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z26cuconvolve_youngAnticausalPdS_iidS_" .size .L__unnamed_3, 40 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z33__device_stub__transposeCoalescedPdPKdii .addrsig_sym _Z37__device_stub__cuconvolve_youngCausalPdS_iidS_ .addrsig_sym _Z41__device_stub__cuconvolve_youngAnticausalPdS_iidS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z18transposeCoalescedPdPKdii .addrsig_sym _Z22cuconvolve_youngCausalPdS_iidS_ .addrsig_sym _Z26cuconvolve_youngAnticausalPdS_iidS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <cstdlib> #include <cstdio> #include <iostream> #include <cuda.h> #include <cuda_runtime.h> bool allocate (void **ptr, size_t size){ // since cudaMalloc accepts double pointer, casting is required. cudaError_t stat = cudaMalloc(ptr, size); if (stat == cudaSuccess) return true; // if no success, print the error std::cout << "allocation stat: " << stat << std::endl; return false; } /* enum cudaMemcpyKind: cudaMemcpyHostToHost = 0 cudaMemcpyHostToDevice = 1 cudaMemcpyDeviceToHost = 2 cudaMemcpyDeviceToDevice = 3 cudaMemcpyDefault = 4 */ bool copy(void *dst, void *src, size_t size, int cudaCpyKind){ cudaMemcpyKind dir; switch (cudaCpyKind) { case 0: dir = cudaMemcpyHostToHost; break; case 1: dir = cudaMemcpyHostToDevice; break; case 2: dir = cudaMemcpyDeviceToHost; break; case 3: dir = cudaMemcpyDeviceToDevice; break; default: dir = cudaMemcpyHostToHost; break; } cudaError_t stat = cudaMemcpy(dst, src, size, dir); if (stat == cudaSuccess) return true; // if no success, print the error std::cout << "copy stat: " << stat << std::endl; return false; } void release(void *ptr){ cudaFree(ptr);}
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cstdlib> #include <cstdio> #include <iostream> #include <cuda.h> #include <cuda_runtime.h> bool allocate (void **ptr, size_t size){ // since cudaMalloc accepts double pointer, casting is required. cudaError_t stat = cudaMalloc(ptr, size); if (stat == cudaSuccess) return true; // if no success, print the error std::cout << "allocation stat: " << stat << std::endl; return false; } /* enum cudaMemcpyKind: cudaMemcpyHostToHost = 0 cudaMemcpyHostToDevice = 1 cudaMemcpyDeviceToHost = 2 cudaMemcpyDeviceToDevice = 3 cudaMemcpyDefault = 4 */ bool copy(void *dst, void *src, size_t size, int cudaCpyKind){ cudaMemcpyKind dir; switch (cudaCpyKind) { case 0: dir = cudaMemcpyHostToHost; break; case 1: dir = cudaMemcpyHostToDevice; break; case 2: dir = cudaMemcpyDeviceToHost; break; case 3: dir = cudaMemcpyDeviceToDevice; break; default: dir = cudaMemcpyHostToHost; break; } cudaError_t stat = cudaMemcpy(dst, src, size, dir); if (stat == cudaSuccess) return true; // if no success, print the error std::cout << "copy stat: " << stat << std::endl; return false; } void release(void *ptr){ cudaFree(ptr);}
.file "tmpxft_00046ae3_00000000-6_cuda_handler.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3674: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3674: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "allocation stat: " .text .globl _Z8allocatePPvm .type _Z8allocatePPvm, @function _Z8allocatePPvm: .LFB3669: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 call cudaMalloc@PLT movl %eax, %ebx movl $1, %eax testl %ebx, %ebx jne .L10 .L3: addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L10: .cfi_restore_state movl $17, %edx leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rbp movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl %ebx, %esi movq %rbp, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L11 cmpb $0, 56(%rbp) je .L6 movzbl 67(%rbp), %esi .L7: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $0, %eax jmp .L3 .L11: call _ZSt16__throw_bad_castv@PLT .L6: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L7 .cfi_endproc .LFE3669: .size _Z8allocatePPvm, .-_Z8allocatePPvm .section .rodata.str1.1 .LC1: .string "copy stat: " .text .globl _Z4copyPvS_mi .type _Z4copyPvS_mi, @function _Z4copyPvS_mi: .LFB3670: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movl $2, %eax cmpl $2, %ecx je .L13 movl $3, %eax cmpl $3, %ecx je .L13 cmpl $1, %ecx sete %al movzbl %al, %eax .L13: movl %eax, %ecx call cudaMemcpy@PLT movl %eax, %ebx movl $1, %eax testl %ebx, %ebx jne .L22 .L12: addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L22: .cfi_restore_state movl $11, %edx leaq .LC1(%rip), %rsi leaq _ZSt4cout(%rip), %rbp movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl %ebx, %esi movq %rbp, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L23 cmpb $0, 56(%rbp) je .L16 movzbl 67(%rbp), %esi .L17: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $0, %eax jmp .L12 .L23: call _ZSt16__throw_bad_castv@PLT .L16: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L17 .cfi_endproc .LFE3670: .size _Z4copyPvS_mi, .-_Z4copyPvS_mi .globl _Z7releasePv .type _Z7releasePv, @function _Z7releasePv: .LFB3671: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call cudaFree@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3671: .size _Z7releasePv, .-_Z7releasePv .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cstdlib> #include <cstdio> #include <iostream> #include <cuda.h> #include <cuda_runtime.h> bool allocate (void **ptr, size_t size){ // since cudaMalloc accepts double pointer, casting is required. cudaError_t stat = cudaMalloc(ptr, size); if (stat == cudaSuccess) return true; // if no success, print the error std::cout << "allocation stat: " << stat << std::endl; return false; } /* enum cudaMemcpyKind: cudaMemcpyHostToHost = 0 cudaMemcpyHostToDevice = 1 cudaMemcpyDeviceToHost = 2 cudaMemcpyDeviceToDevice = 3 cudaMemcpyDefault = 4 */ bool copy(void *dst, void *src, size_t size, int cudaCpyKind){ cudaMemcpyKind dir; switch (cudaCpyKind) { case 0: dir = cudaMemcpyHostToHost; break; case 1: dir = cudaMemcpyHostToDevice; break; case 2: dir = cudaMemcpyDeviceToHost; break; case 3: dir = cudaMemcpyDeviceToDevice; break; default: dir = cudaMemcpyHostToHost; break; } cudaError_t stat = cudaMemcpy(dst, src, size, dir); if (stat == cudaSuccess) return true; // if no success, print the error std::cout << "copy stat: " << stat << std::endl; return false; } void release(void *ptr){ cudaFree(ptr);}
#include <cstdlib> #include <cstdio> #include <iostream> #include <hip/hip_runtime.h> bool allocate (void **ptr, size_t size){ // since cudaMalloc accepts double pointer, casting is required. hipError_t stat = hipMalloc(ptr, size); if (stat == hipSuccess) return true; // if no success, print the error std::cout << "allocation stat: " << stat << std::endl; return false; } /* enum cudaMemcpyKind: cudaMemcpyHostToHost = 0 cudaMemcpyHostToDevice = 1 cudaMemcpyDeviceToHost = 2 cudaMemcpyDeviceToDevice = 3 cudaMemcpyDefault = 4 */ bool copy(void *dst, void *src, size_t size, int cudaCpyKind){ hipMemcpyKind dir; switch (cudaCpyKind) { case 0: dir = hipMemcpyHostToHost; break; case 1: dir = hipMemcpyHostToDevice; break; case 2: dir = hipMemcpyDeviceToHost; break; case 3: dir = hipMemcpyDeviceToDevice; break; default: dir = hipMemcpyHostToHost; break; } hipError_t stat = hipMemcpy(dst, src, size, dir); if (stat == hipSuccess) return true; // if no success, print the error std::cout << "copy stat: " << stat << std::endl; return false; } void release(void *ptr){ hipFree(ptr);}
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <cstdlib> #include <cstdio> #include <iostream> #include <hip/hip_runtime.h> bool allocate (void **ptr, size_t size){ // since cudaMalloc accepts double pointer, casting is required. hipError_t stat = hipMalloc(ptr, size); if (stat == hipSuccess) return true; // if no success, print the error std::cout << "allocation stat: " << stat << std::endl; return false; } /* enum cudaMemcpyKind: cudaMemcpyHostToHost = 0 cudaMemcpyHostToDevice = 1 cudaMemcpyDeviceToHost = 2 cudaMemcpyDeviceToDevice = 3 cudaMemcpyDefault = 4 */ bool copy(void *dst, void *src, size_t size, int cudaCpyKind){ hipMemcpyKind dir; switch (cudaCpyKind) { case 0: dir = hipMemcpyHostToHost; break; case 1: dir = hipMemcpyHostToDevice; break; case 2: dir = hipMemcpyDeviceToHost; break; case 3: dir = hipMemcpyDeviceToDevice; break; default: dir = hipMemcpyHostToHost; break; } hipError_t stat = hipMemcpy(dst, src, size, dir); if (stat == hipSuccess) return true; // if no success, print the error std::cout << "copy stat: " << stat << std::endl; return false; } void release(void *ptr){ hipFree(ptr);}
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <cstdlib> #include <cstdio> #include <iostream> #include <hip/hip_runtime.h> bool allocate (void **ptr, size_t size){ // since cudaMalloc accepts double pointer, casting is required. hipError_t stat = hipMalloc(ptr, size); if (stat == hipSuccess) return true; // if no success, print the error std::cout << "allocation stat: " << stat << std::endl; return false; } /* enum cudaMemcpyKind: cudaMemcpyHostToHost = 0 cudaMemcpyHostToDevice = 1 cudaMemcpyDeviceToHost = 2 cudaMemcpyDeviceToDevice = 3 cudaMemcpyDefault = 4 */ bool copy(void *dst, void *src, size_t size, int cudaCpyKind){ hipMemcpyKind dir; switch (cudaCpyKind) { case 0: dir = hipMemcpyHostToHost; break; case 1: dir = hipMemcpyHostToDevice; break; case 2: dir = hipMemcpyDeviceToHost; break; case 3: dir = hipMemcpyDeviceToDevice; break; default: dir = hipMemcpyHostToHost; break; } hipError_t stat = hipMemcpy(dst, src, size, dir); if (stat == hipSuccess) return true; // if no success, print the error std::cout << "copy stat: " << stat << std::endl; return false; } void release(void *ptr){ hipFree(ptr);}
.text .file "cuda_handler.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z8allocatePPvm # -- Begin function _Z8allocatePPvm .p2align 4, 0x90 .type _Z8allocatePPvm,@function _Z8allocatePPvm: # @_Z8allocatePPvm .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 callq hipMalloc movl %eax, %ebx testl %eax, %eax je .LBB0_6 # %bb.1: movl $_ZSt4cout, %edi movl $.L.str, %esi movl $17, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movl %ebx, %esi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r14 testq %r14, %r14 je .LBB0_7 # %bb.2: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%r14) je .LBB0_4 # %bb.3: movzbl 67(%r14), %ecx jmp .LBB0_5 .LBB0_4: movq %r14, %rdi movq %rax, %r15 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r15, %rax .LBB0_5: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv .LBB0_6: testl %ebx, %ebx sete %al popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB0_7: .cfi_def_cfa_offset 32 callq _ZSt16__throw_bad_castv .Lfunc_end0: .size _Z8allocatePPvm, .Lfunc_end0-_Z8allocatePPvm .cfi_endproc # -- End function .globl _Z4copyPvS_mi # -- Begin function _Z4copyPvS_mi .p2align 4, 0x90 .type _Z4copyPvS_mi,@function _Z4copyPvS_mi: # @_Z4copyPvS_mi .cfi_startproc # %bb.0: # %switch.lookup pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 # kill: def $ecx killed $ecx def $rcx leal -1(%rcx), %eax xorl %r8d, %r8d cmpl $3, %eax cmovael %r8d, %ecx # kill: def $ecx killed $ecx killed $rcx callq hipMemcpy movl %eax, %ebx testl %eax, %eax je .LBB1_6 # %bb.1: movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $11, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movl %ebx, %esi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r14 testq %r14, %r14 je .LBB1_7 # %bb.2: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%r14) je .LBB1_4 # %bb.3: movzbl 67(%r14), %ecx jmp .LBB1_5 .LBB1_4: movq %r14, %rdi movq %rax, %r15 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r15, %rax .LBB1_5: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv .LBB1_6: testl %ebx, %ebx sete %al popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB1_7: .cfi_def_cfa_offset 32 callq _ZSt16__throw_bad_castv .Lfunc_end1: .size _Z4copyPvS_mi, .Lfunc_end1-_Z4copyPvS_mi .cfi_endproc # -- End function .globl _Z7releasePv # -- Begin function _Z7releasePv .p2align 4, 0x90 .type _Z7releasePv,@function _Z7releasePv: # @_Z7releasePv .cfi_startproc # %bb.0: jmp hipFree # TAILCALL .Lfunc_end2: .size _Z7releasePv, .Lfunc_end2-_Z7releasePv .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "allocation stat: " .size .L.str, 18 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "copy stat: " .size .L.str.1, 12 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _ZSt4cout .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00046ae3_00000000-6_cuda_handler.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3674: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3674: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "allocation stat: " .text .globl _Z8allocatePPvm .type _Z8allocatePPvm, @function _Z8allocatePPvm: .LFB3669: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 call cudaMalloc@PLT movl %eax, %ebx movl $1, %eax testl %ebx, %ebx jne .L10 .L3: addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L10: .cfi_restore_state movl $17, %edx leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rbp movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl %ebx, %esi movq %rbp, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L11 cmpb $0, 56(%rbp) je .L6 movzbl 67(%rbp), %esi .L7: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $0, %eax jmp .L3 .L11: call _ZSt16__throw_bad_castv@PLT .L6: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L7 .cfi_endproc .LFE3669: .size _Z8allocatePPvm, .-_Z8allocatePPvm .section .rodata.str1.1 .LC1: .string "copy stat: " .text .globl _Z4copyPvS_mi .type _Z4copyPvS_mi, @function _Z4copyPvS_mi: .LFB3670: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movl $2, %eax cmpl $2, %ecx je .L13 movl $3, %eax cmpl $3, %ecx je .L13 cmpl $1, %ecx sete %al movzbl %al, %eax .L13: movl %eax, %ecx call cudaMemcpy@PLT movl %eax, %ebx movl $1, %eax testl %ebx, %ebx jne .L22 .L12: addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L22: .cfi_restore_state movl $11, %edx leaq .LC1(%rip), %rsi leaq _ZSt4cout(%rip), %rbp movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl %ebx, %esi movq %rbp, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L23 cmpb $0, 56(%rbp) je .L16 movzbl 67(%rbp), %esi .L17: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $0, %eax jmp .L12 .L23: call _ZSt16__throw_bad_castv@PLT .L16: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L17 .cfi_endproc .LFE3670: .size _Z4copyPvS_mi, .-_Z4copyPvS_mi .globl _Z7releasePv .type _Z7releasePv, @function _Z7releasePv: .LFB3671: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call cudaFree@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3671: .size _Z7releasePv, .-_Z7releasePv .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "cuda_handler.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z8allocatePPvm # -- Begin function _Z8allocatePPvm .p2align 4, 0x90 .type _Z8allocatePPvm,@function _Z8allocatePPvm: # @_Z8allocatePPvm .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 callq hipMalloc movl %eax, %ebx testl %eax, %eax je .LBB0_6 # %bb.1: movl $_ZSt4cout, %edi movl $.L.str, %esi movl $17, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movl %ebx, %esi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r14 testq %r14, %r14 je .LBB0_7 # %bb.2: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%r14) je .LBB0_4 # %bb.3: movzbl 67(%r14), %ecx jmp .LBB0_5 .LBB0_4: movq %r14, %rdi movq %rax, %r15 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r15, %rax .LBB0_5: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv .LBB0_6: testl %ebx, %ebx sete %al popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB0_7: .cfi_def_cfa_offset 32 callq _ZSt16__throw_bad_castv .Lfunc_end0: .size _Z8allocatePPvm, .Lfunc_end0-_Z8allocatePPvm .cfi_endproc # -- End function .globl _Z4copyPvS_mi # -- Begin function _Z4copyPvS_mi .p2align 4, 0x90 .type _Z4copyPvS_mi,@function _Z4copyPvS_mi: # @_Z4copyPvS_mi .cfi_startproc # %bb.0: # %switch.lookup pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 # kill: def $ecx killed $ecx def $rcx leal -1(%rcx), %eax xorl %r8d, %r8d cmpl $3, %eax cmovael %r8d, %ecx # kill: def $ecx killed $ecx killed $rcx callq hipMemcpy movl %eax, %ebx testl %eax, %eax je .LBB1_6 # %bb.1: movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $11, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movl %ebx, %esi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r14 testq %r14, %r14 je .LBB1_7 # %bb.2: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%r14) je .LBB1_4 # %bb.3: movzbl 67(%r14), %ecx jmp .LBB1_5 .LBB1_4: movq %r14, %rdi movq %rax, %r15 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r15, %rax .LBB1_5: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv .LBB1_6: testl %ebx, %ebx sete %al popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB1_7: .cfi_def_cfa_offset 32 callq _ZSt16__throw_bad_castv .Lfunc_end1: .size _Z4copyPvS_mi, .Lfunc_end1-_Z4copyPvS_mi .cfi_endproc # -- End function .globl _Z7releasePv # -- Begin function _Z7releasePv .p2align 4, 0x90 .type _Z7releasePv,@function _Z7releasePv: # @_Z7releasePv .cfi_startproc # %bb.0: jmp hipFree # TAILCALL .Lfunc_end2: .size _Z7releasePv, .Lfunc_end2-_Z7releasePv .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "allocation stat: " .size .L.str, 18 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "copy stat: " .size .L.str.1, 12 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _ZSt4cout .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
/* fragment gpu RAM by allocating a bunch of blocks and then releasing some in between, creating holes then try to allocate more than the size of the largest hole, but less than total free memory it appears that CUDA succeeds conclusiong: cudaMalloc it's not allocating contiguous memory */ #include <stdio.h> #include <unistd.h> #include <cuda.h> const size_t Mb = 1<<20; // Assuming a 1Mb page size here #define DSIZE0 410000000ULL // ~400MB #define DSIZE1 3144000000ULL // ~3000MB #define DSIZE2 524000000ULL // ~500MB #define DSIZE3 630000000ULL // ~600MB void can_allocate() { size_t total; size_t avail; cudaError_t cuda_status = cudaMemGetInfo(&avail, &total); if ( cudaSuccess != cuda_status ) { printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) ); exit(EXIT_FAILURE); } printf("free: %.f, total %.f\n", (double)avail/Mb, (double)total/Mb); int *buf_d = 0; size_t nwords = total / sizeof(int); size_t words_per_Mb = Mb / sizeof(int); /* the only way to measure how much memory is allocatable is by trial and error, cudaMemGetInfo's available memory information is not reliable */ while (cudaMalloc((void**)&buf_d, nwords * sizeof(int)) == cudaErrorMemoryAllocation) { cudaFree(buf_d); nwords -= words_per_Mb; if (nwords < words_per_Mb) { // signal no free memory break; } } cudaFree(buf_d); /* clear last error */ printf("err2: %d\n", (int)cudaGetLastError()); printf("can allocate: %.fMB\n", (double)nwords/words_per_Mb); } int main() { int *d0, *d1, *d2, *d3, *d4; //cudaSetDevice(0); /* starting with 8GB free */ /* legend: [allocated]{free} */ // init - prealloc 500MB, including ~100MB CUDA ctx // [0.5]{7.5} - free total=7.5 cudaMalloc(&d0, DSIZE0); printf("err1: %d\n", (int)cudaGetLastError()); // [0.5][0.5]{7.0} - free total=7.0 cudaMalloc(&d1, DSIZE2); printf("err1: %d\n", (int)cudaGetLastError()); // [0.5][0.5][3]{4.0} - free total=4.0 cudaMalloc(&d2, DSIZE1); printf("err2: %d\n", (int)cudaGetLastError()); // [0.5][0.5][3][0.5]{3.5} - free total=3.5 cudaMalloc(&d3, DSIZE2); printf("err3: %d\n", (int)cudaGetLastError()); // [0.5][0.5][3][0.5][3]{0.5} - free total=0.5 cudaMalloc(&d4, DSIZE1); printf("err2: %d\n", (int)cudaGetLastError()); // [0.5]{0.5}[3][0.5][3]{0.5} - free total=1.0 cudaFree(d1); printf("err4: %d\n", (int)cudaGetLastError()); // [0.5]{0.5}[3]{0.5}[3]{0.5} - free total=1.5 cudaFree(d3); printf("err4: %d\n", (int)cudaGetLastError()); // here we should have 1.5GB free in total, with 3 fragments of 0.5GB // this should say 0.5GB, but it says 1.6GB - so it allocates over fragments can_allocate(); // another way to check is we shouldn't be able to allocate say 1GB of contiguous memory cudaMalloc(&d1, 2*DSIZE2); printf("err2: %d\n", (int)cudaGetLastError()); // sanity check 2GB at 1.5G free should fail // this fails, good cudaMalloc(&d1, 4*DSIZE2); printf("err2: %d\n", (int)cudaGetLastError()); sleep(1000); /* keep consuming RAM */ return 0; }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* fragment gpu RAM by allocating a bunch of blocks and then releasing some in between, creating holes then try to allocate more than the size of the largest hole, but less than total free memory it appears that CUDA succeeds conclusiong: cudaMalloc it's not allocating contiguous memory */ #include <stdio.h> #include <unistd.h> #include <cuda.h> const size_t Mb = 1<<20; // Assuming a 1Mb page size here #define DSIZE0 410000000ULL // ~400MB #define DSIZE1 3144000000ULL // ~3000MB #define DSIZE2 524000000ULL // ~500MB #define DSIZE3 630000000ULL // ~600MB void can_allocate() { size_t total; size_t avail; cudaError_t cuda_status = cudaMemGetInfo(&avail, &total); if ( cudaSuccess != cuda_status ) { printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) ); exit(EXIT_FAILURE); } printf("free: %.f, total %.f\n", (double)avail/Mb, (double)total/Mb); int *buf_d = 0; size_t nwords = total / sizeof(int); size_t words_per_Mb = Mb / sizeof(int); /* the only way to measure how much memory is allocatable is by trial and error, cudaMemGetInfo's available memory information is not reliable */ while (cudaMalloc((void**)&buf_d, nwords * sizeof(int)) == cudaErrorMemoryAllocation) { cudaFree(buf_d); nwords -= words_per_Mb; if (nwords < words_per_Mb) { // signal no free memory break; } } cudaFree(buf_d); /* clear last error */ printf("err2: %d\n", (int)cudaGetLastError()); printf("can allocate: %.fMB\n", (double)nwords/words_per_Mb); } int main() { int *d0, *d1, *d2, *d3, *d4; //cudaSetDevice(0); /* starting with 8GB free */ /* legend: [allocated]{free} */ // init - prealloc 500MB, including ~100MB CUDA ctx // [0.5]{7.5} - free total=7.5 cudaMalloc(&d0, DSIZE0); printf("err1: %d\n", (int)cudaGetLastError()); // [0.5][0.5]{7.0} - free total=7.0 cudaMalloc(&d1, DSIZE2); printf("err1: %d\n", (int)cudaGetLastError()); // [0.5][0.5][3]{4.0} - free total=4.0 cudaMalloc(&d2, DSIZE1); printf("err2: %d\n", (int)cudaGetLastError()); // [0.5][0.5][3][0.5]{3.5} - free total=3.5 cudaMalloc(&d3, DSIZE2); printf("err3: %d\n", (int)cudaGetLastError()); // [0.5][0.5][3][0.5][3]{0.5} - free total=0.5 cudaMalloc(&d4, DSIZE1); printf("err2: %d\n", (int)cudaGetLastError()); // [0.5]{0.5}[3][0.5][3]{0.5} - free total=1.0 cudaFree(d1); printf("err4: %d\n", (int)cudaGetLastError()); // [0.5]{0.5}[3]{0.5}[3]{0.5} - free total=1.5 cudaFree(d3); printf("err4: %d\n", (int)cudaGetLastError()); // here we should have 1.5GB free in total, with 3 fragments of 0.5GB // this should say 0.5GB, but it says 1.6GB - so it allocates over fragments can_allocate(); // another way to check is we shouldn't be able to allocate say 1GB of contiguous memory cudaMalloc(&d1, 2*DSIZE2); printf("err2: %d\n", (int)cudaGetLastError()); // sanity check 2GB at 1.5G free should fail // this fails, good cudaMalloc(&d1, 4*DSIZE2); printf("err2: %d\n", (int)cudaGetLastError()); sleep(1000); /* keep consuming RAM */ return 0; }
.file "tmpxft_001671d3_00000000-6_fragment_gpu_mem.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2074: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2074: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "Error: cudaMemGetInfo fails, %s \n" .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "free: %.f, total %.f\n" .LC3: .string "err2: %d\n" .LC5: .string "can allocate: %.fMB\n" .text .globl _Z12can_allocatev .type _Z12can_allocatev, @function _Z12can_allocatev: .LFB2070: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $40, %rsp .cfi_def_cfa_offset 64 movq %fs:40, %rax movq %rax, 24(%rsp) xorl %eax, %eax movq %rsp, %rsi leaq 8(%rsp), %rdi call cudaMemGetInfo@PLT testl %eax, %eax jne .L16 movq (%rsp), %rax testq %rax, %rax js .L5 pxor %xmm1, %xmm1 cvtsi2sdq %rax, %xmm1 .L6: mulsd .LC1(%rip), %xmm1 movq 8(%rsp), %rax testq %rax, %rax js .L7 pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 .L8: mulsd .LC1(%rip), %xmm0 leaq .LC2(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT movq $0, 16(%rsp) movq (%rsp), %rbx shrq $2, %rbx leaq 16(%rsp), %rbp .L9: leaq 0(,%rbx,4), %rsi movq %rbp, %rdi call cudaMalloc@PLT cmpl $2, %eax jne .L10 movq 16(%rsp), %rdi call cudaFree@PLT subq $262144, %rbx cmpq $262143, %rbx ja .L9 .L10: movq 16(%rsp), %rdi call cudaFree@PLT call cudaGetLastError@PLT movl %eax, %edx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT testq %rbx, %rbx js .L12 pxor %xmm0, %xmm0 cvtsi2sdq %rbx, %xmm0 .L13: mulsd .LC4(%rip), %xmm0 leaq .LC5(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 24(%rsp), %rax subq %fs:40, %rax jne .L17 addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L16: .cfi_restore_state movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L5: movq %rax, %rdx shrq %rdx andl $1, %eax orq %rax, %rdx pxor %xmm1, %xmm1 cvtsi2sdq %rdx, %xmm1 addsd %xmm1, %xmm1 jmp .L6 .L7: movq %rax, %rdx shrq %rdx andl $1, %eax orq %rax, %rdx pxor %xmm0, %xmm0 cvtsi2sdq %rdx, %xmm0 addsd %xmm0, %xmm0 jmp .L8 .L12: movq %rbx, %rax shrq %rax andl $1, %ebx orq %rbx, %rax pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 addsd %xmm0, %xmm0 jmp .L13 .L17: call __stack_chk_fail@PLT .cfi_endproc .LFE2070: .size _Z12can_allocatev, .-_Z12can_allocatev .section .rodata.str1.1 .LC6: .string "err1: %d\n" .LC7: .string "err3: %d\n" .LC8: .string "err4: %d\n" .text .globl main .type main, @function main: .LFB2071: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $48, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movq %rsp, %rdi movl $410000000, %esi call cudaMalloc@PLT call cudaGetLastError@PLT movl %eax, %edx leaq .LC6(%rip), %rbx movq %rbx, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 8(%rsp), %rbp movl $524000000, %esi movq %rbp, %rdi call cudaMalloc@PLT call cudaGetLastError@PLT movl %eax, %edx movq %rbx, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 16(%rsp), %rdi movl $3144000000, %r12d movq %r12, %rsi call cudaMalloc@PLT call cudaGetLastError@PLT movl %eax, %edx leaq .LC3(%rip), %rbx movq %rbx, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 24(%rsp), %rdi movl $524000000, %esi call cudaMalloc@PLT call cudaGetLastError@PLT movl %eax, %edx leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 32(%rsp), %rdi movq %r12, %rsi call cudaMalloc@PLT call cudaGetLastError@PLT movl %eax, %edx movq %rbx, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 8(%rsp), %rdi call cudaFree@PLT call cudaGetLastError@PLT movl %eax, %edx leaq .LC8(%rip), %r12 movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 24(%rsp), %rdi call cudaFree@PLT call cudaGetLastError@PLT movl %eax, %edx movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call _Z12can_allocatev movl $1048000000, %esi movq %rbp, %rdi call cudaMalloc@PLT call cudaGetLastError@PLT movl %eax, %edx movq %rbx, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $2096000000, %esi movq %rbp, %rdi call cudaMalloc@PLT call cudaGetLastError@PLT movl %eax, %edx movq %rbx, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1000, %edi call sleep@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L21 movl $0, %eax addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L21: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2071: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2097: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2097: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long 0 .long 1051721728 .align 8 .LC4: .long 0 .long 1053818880 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* fragment gpu RAM by allocating a bunch of blocks and then releasing some in between, creating holes then try to allocate more than the size of the largest hole, but less than total free memory it appears that CUDA succeeds conclusiong: cudaMalloc it's not allocating contiguous memory */ #include <stdio.h> #include <unistd.h> #include <cuda.h> const size_t Mb = 1<<20; // Assuming a 1Mb page size here #define DSIZE0 410000000ULL // ~400MB #define DSIZE1 3144000000ULL // ~3000MB #define DSIZE2 524000000ULL // ~500MB #define DSIZE3 630000000ULL // ~600MB void can_allocate() { size_t total; size_t avail; cudaError_t cuda_status = cudaMemGetInfo(&avail, &total); if ( cudaSuccess != cuda_status ) { printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) ); exit(EXIT_FAILURE); } printf("free: %.f, total %.f\n", (double)avail/Mb, (double)total/Mb); int *buf_d = 0; size_t nwords = total / sizeof(int); size_t words_per_Mb = Mb / sizeof(int); /* the only way to measure how much memory is allocatable is by trial and error, cudaMemGetInfo's available memory information is not reliable */ while (cudaMalloc((void**)&buf_d, nwords * sizeof(int)) == cudaErrorMemoryAllocation) { cudaFree(buf_d); nwords -= words_per_Mb; if (nwords < words_per_Mb) { // signal no free memory break; } } cudaFree(buf_d); /* clear last error */ printf("err2: %d\n", (int)cudaGetLastError()); printf("can allocate: %.fMB\n", (double)nwords/words_per_Mb); } int main() { int *d0, *d1, *d2, *d3, *d4; //cudaSetDevice(0); /* starting with 8GB free */ /* legend: [allocated]{free} */ // init - prealloc 500MB, including ~100MB CUDA ctx // [0.5]{7.5} - free total=7.5 cudaMalloc(&d0, DSIZE0); printf("err1: %d\n", (int)cudaGetLastError()); // [0.5][0.5]{7.0} - free total=7.0 cudaMalloc(&d1, DSIZE2); printf("err1: %d\n", (int)cudaGetLastError()); // [0.5][0.5][3]{4.0} - free total=4.0 cudaMalloc(&d2, DSIZE1); printf("err2: %d\n", (int)cudaGetLastError()); // [0.5][0.5][3][0.5]{3.5} - free total=3.5 cudaMalloc(&d3, DSIZE2); printf("err3: %d\n", (int)cudaGetLastError()); // [0.5][0.5][3][0.5][3]{0.5} - free total=0.5 cudaMalloc(&d4, DSIZE1); printf("err2: %d\n", (int)cudaGetLastError()); // [0.5]{0.5}[3][0.5][3]{0.5} - free total=1.0 cudaFree(d1); printf("err4: %d\n", (int)cudaGetLastError()); // [0.5]{0.5}[3]{0.5}[3]{0.5} - free total=1.5 cudaFree(d3); printf("err4: %d\n", (int)cudaGetLastError()); // here we should have 1.5GB free in total, with 3 fragments of 0.5GB // this should say 0.5GB, but it says 1.6GB - so it allocates over fragments can_allocate(); // another way to check is we shouldn't be able to allocate say 1GB of contiguous memory cudaMalloc(&d1, 2*DSIZE2); printf("err2: %d\n", (int)cudaGetLastError()); // sanity check 2GB at 1.5G free should fail // this fails, good cudaMalloc(&d1, 4*DSIZE2); printf("err2: %d\n", (int)cudaGetLastError()); sleep(1000); /* keep consuming RAM */ return 0; }
/* fragment gpu RAM by allocating a bunch of blocks and then releasing some in between, creating holes then try to allocate more than the size of the largest hole, but less than total free memory it appears that CUDA succeeds conclusiong: cudaMalloc it's not allocating contiguous memory */ #include <stdio.h> #include <unistd.h> #include <hip/hip_runtime.h> const size_t Mb = 1<<20; // Assuming a 1Mb page size here #define DSIZE0 410000000ULL // ~400MB #define DSIZE1 3144000000ULL // ~3000MB #define DSIZE2 524000000ULL // ~500MB #define DSIZE3 630000000ULL // ~600MB void can_allocate() { size_t total; size_t avail; hipError_t cuda_status = hipMemGetInfo(&avail, &total); if ( hipSuccess != cuda_status ) { printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) ); exit(EXIT_FAILURE); } printf("free: %.f, total %.f\n", (double)avail/Mb, (double)total/Mb); int *buf_d = 0; size_t nwords = total / sizeof(int); size_t words_per_Mb = Mb / sizeof(int); /* the only way to measure how much memory is allocatable is by trial and error, cudaMemGetInfo's available memory information is not reliable */ while (hipMalloc((void**)&buf_d, nwords * sizeof(int)) == hipErrorOutOfMemory) { hipFree(buf_d); nwords -= words_per_Mb; if (nwords < words_per_Mb) { // signal no free memory break; } } hipFree(buf_d); /* clear last error */ printf("err2: %d\n", (int)hipGetLastError()); printf("can allocate: %.fMB\n", (double)nwords/words_per_Mb); } int main() { int *d0, *d1, *d2, *d3, *d4; //cudaSetDevice(0); /* starting with 8GB free */ /* legend: [allocated]{free} */ // init - prealloc 500MB, including ~100MB CUDA ctx // [0.5]{7.5} - free total=7.5 hipMalloc(&d0, DSIZE0); printf("err1: %d\n", (int)hipGetLastError()); // [0.5][0.5]{7.0} - free total=7.0 hipMalloc(&d1, DSIZE2); printf("err1: %d\n", (int)hipGetLastError()); // [0.5][0.5][3]{4.0} - free total=4.0 hipMalloc(&d2, DSIZE1); printf("err2: %d\n", (int)hipGetLastError()); // [0.5][0.5][3][0.5]{3.5} - free total=3.5 hipMalloc(&d3, DSIZE2); printf("err3: %d\n", (int)hipGetLastError()); // [0.5][0.5][3][0.5][3]{0.5} - free total=0.5 hipMalloc(&d4, DSIZE1); printf("err2: %d\n", (int)hipGetLastError()); // [0.5]{0.5}[3][0.5][3]{0.5} - free total=1.0 hipFree(d1); printf("err4: %d\n", (int)hipGetLastError()); // [0.5]{0.5}[3]{0.5}[3]{0.5} - free total=1.5 hipFree(d3); printf("err4: %d\n", (int)hipGetLastError()); // here we should have 1.5GB free in total, with 3 fragments of 0.5GB // this should say 0.5GB, but it says 1.6GB - so it allocates over fragments can_allocate(); // another way to check is we shouldn't be able to allocate say 1GB of contiguous memory hipMalloc(&d1, 2*DSIZE2); printf("err2: %d\n", (int)hipGetLastError()); // sanity check 2GB at 1.5G free should fail // this fails, good hipMalloc(&d1, 4*DSIZE2); printf("err2: %d\n", (int)hipGetLastError()); sleep(1000); /* keep consuming RAM */ return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/* fragment gpu RAM by allocating a bunch of blocks and then releasing some in between, creating holes then try to allocate more than the size of the largest hole, but less than total free memory it appears that CUDA succeeds conclusiong: cudaMalloc it's not allocating contiguous memory */ #include <stdio.h> #include <unistd.h> #include <hip/hip_runtime.h> const size_t Mb = 1<<20; // Assuming a 1Mb page size here #define DSIZE0 410000000ULL // ~400MB #define DSIZE1 3144000000ULL // ~3000MB #define DSIZE2 524000000ULL // ~500MB #define DSIZE3 630000000ULL // ~600MB void can_allocate() { size_t total; size_t avail; hipError_t cuda_status = hipMemGetInfo(&avail, &total); if ( hipSuccess != cuda_status ) { printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) ); exit(EXIT_FAILURE); } printf("free: %.f, total %.f\n", (double)avail/Mb, (double)total/Mb); int *buf_d = 0; size_t nwords = total / sizeof(int); size_t words_per_Mb = Mb / sizeof(int); /* the only way to measure how much memory is allocatable is by trial and error, cudaMemGetInfo's available memory information is not reliable */ while (hipMalloc((void**)&buf_d, nwords * sizeof(int)) == hipErrorOutOfMemory) { hipFree(buf_d); nwords -= words_per_Mb; if (nwords < words_per_Mb) { // signal no free memory break; } } hipFree(buf_d); /* clear last error */ printf("err2: %d\n", (int)hipGetLastError()); printf("can allocate: %.fMB\n", (double)nwords/words_per_Mb); } int main() { int *d0, *d1, *d2, *d3, *d4; //cudaSetDevice(0); /* starting with 8GB free */ /* legend: [allocated]{free} */ // init - prealloc 500MB, including ~100MB CUDA ctx // [0.5]{7.5} - free total=7.5 hipMalloc(&d0, DSIZE0); printf("err1: %d\n", (int)hipGetLastError()); // [0.5][0.5]{7.0} - free total=7.0 hipMalloc(&d1, DSIZE2); printf("err1: %d\n", (int)hipGetLastError()); // [0.5][0.5][3]{4.0} - free total=4.0 hipMalloc(&d2, DSIZE1); printf("err2: %d\n", (int)hipGetLastError()); // [0.5][0.5][3][0.5]{3.5} - free total=3.5 hipMalloc(&d3, DSIZE2); printf("err3: %d\n", (int)hipGetLastError()); // [0.5][0.5][3][0.5][3]{0.5} - free total=0.5 hipMalloc(&d4, DSIZE1); printf("err2: %d\n", (int)hipGetLastError()); // [0.5]{0.5}[3][0.5][3]{0.5} - free total=1.0 hipFree(d1); printf("err4: %d\n", (int)hipGetLastError()); // [0.5]{0.5}[3]{0.5}[3]{0.5} - free total=1.5 hipFree(d3); printf("err4: %d\n", (int)hipGetLastError()); // here we should have 1.5GB free in total, with 3 fragments of 0.5GB // this should say 0.5GB, but it says 1.6GB - so it allocates over fragments can_allocate(); // another way to check is we shouldn't be able to allocate say 1GB of contiguous memory hipMalloc(&d1, 2*DSIZE2); printf("err2: %d\n", (int)hipGetLastError()); // sanity check 2GB at 1.5G free should fail // this fails, good hipMalloc(&d1, 4*DSIZE2); printf("err2: %d\n", (int)hipGetLastError()); sleep(1000); /* keep consuming RAM */ return 0; }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* fragment gpu RAM by allocating a bunch of blocks and then releasing some in between, creating holes then try to allocate more than the size of the largest hole, but less than total free memory it appears that CUDA succeeds conclusiong: cudaMalloc it's not allocating contiguous memory */ #include <stdio.h> #include <unistd.h> #include <hip/hip_runtime.h> const size_t Mb = 1<<20; // Assuming a 1Mb page size here #define DSIZE0 410000000ULL // ~400MB #define DSIZE1 3144000000ULL // ~3000MB #define DSIZE2 524000000ULL // ~500MB #define DSIZE3 630000000ULL // ~600MB void can_allocate() { size_t total; size_t avail; hipError_t cuda_status = hipMemGetInfo(&avail, &total); if ( hipSuccess != cuda_status ) { printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) ); exit(EXIT_FAILURE); } printf("free: %.f, total %.f\n", (double)avail/Mb, (double)total/Mb); int *buf_d = 0; size_t nwords = total / sizeof(int); size_t words_per_Mb = Mb / sizeof(int); /* the only way to measure how much memory is allocatable is by trial and error, cudaMemGetInfo's available memory information is not reliable */ while (hipMalloc((void**)&buf_d, nwords * sizeof(int)) == hipErrorOutOfMemory) { hipFree(buf_d); nwords -= words_per_Mb; if (nwords < words_per_Mb) { // signal no free memory break; } } hipFree(buf_d); /* clear last error */ printf("err2: %d\n", (int)hipGetLastError()); printf("can allocate: %.fMB\n", (double)nwords/words_per_Mb); } int main() { int *d0, *d1, *d2, *d3, *d4; //cudaSetDevice(0); /* starting with 8GB free */ /* legend: [allocated]{free} */ // init - prealloc 500MB, including ~100MB CUDA ctx // [0.5]{7.5} - free total=7.5 hipMalloc(&d0, DSIZE0); printf("err1: %d\n", (int)hipGetLastError()); // [0.5][0.5]{7.0} - free total=7.0 hipMalloc(&d1, DSIZE2); printf("err1: %d\n", (int)hipGetLastError()); // [0.5][0.5][3]{4.0} - free total=4.0 hipMalloc(&d2, DSIZE1); printf("err2: %d\n", (int)hipGetLastError()); // [0.5][0.5][3][0.5]{3.5} - free total=3.5 hipMalloc(&d3, DSIZE2); printf("err3: %d\n", (int)hipGetLastError()); // [0.5][0.5][3][0.5][3]{0.5} - free total=0.5 hipMalloc(&d4, DSIZE1); printf("err2: %d\n", (int)hipGetLastError()); // [0.5]{0.5}[3][0.5][3]{0.5} - free total=1.0 hipFree(d1); printf("err4: %d\n", (int)hipGetLastError()); // [0.5]{0.5}[3]{0.5}[3]{0.5} - free total=1.5 hipFree(d3); printf("err4: %d\n", (int)hipGetLastError()); // here we should have 1.5GB free in total, with 3 fragments of 0.5GB // this should say 0.5GB, but it says 1.6GB - so it allocates over fragments can_allocate(); // another way to check is we shouldn't be able to allocate say 1GB of contiguous memory hipMalloc(&d1, 2*DSIZE2); printf("err2: %d\n", (int)hipGetLastError()); // sanity check 2GB at 1.5G free should fail // this fails, good hipMalloc(&d1, 4*DSIZE2); printf("err2: %d\n", (int)hipGetLastError()); sleep(1000); /* keep consuming RAM */ return 0; }
.text .file "fragment_gpu_mem.hip" .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 # -- Begin function _Z12can_allocatev .LCPI0_0: .long 1127219200 # 0x43300000 .long 1160773632 # 0x45300000 .long 0 # 0x0 .long 0 # 0x0 .LCPI0_1: .quad 0x4330000000000000 # double 4503599627370496 .quad 0x4530000000000000 # double 1.9342813113834067E+25 .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI0_2: .quad 0x3eb0000000000000 # double 9.5367431640625E-7 .LCPI0_3: .quad 0x3ed0000000000000 # double 3.814697265625E-6 .text .globl _Z12can_allocatev .p2align 4, 0x90 .type _Z12can_allocatev,@function _Z12can_allocatev: # @_Z12can_allocatev .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $32, %rsp .cfi_def_cfa_offset 64 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 leaq 24(%rsp), %rdi leaq 16(%rsp), %rsi callq hipMemGetInfo testl %eax, %eax jne .LBB0_5 # %bb.1: movsd 24(%rsp), %xmm1 # xmm1 = mem[0],zero movapd .LCPI0_0(%rip), %xmm2 # xmm2 = [1127219200,1160773632,0,0] unpcklps %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] movapd .LCPI0_1(%rip), %xmm3 # xmm3 = [4.503599627370496E+15,1.9342813113834067E+25] subpd %xmm3, %xmm1 movapd %xmm1, %xmm0 unpckhpd %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] addsd %xmm1, %xmm0 movsd .LCPI0_2(%rip), %xmm4 # xmm4 = mem[0],zero mulsd %xmm4, %xmm0 movsd 16(%rsp), %xmm5 # xmm5 = mem[0],zero unpcklps %xmm2, %xmm5 # xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1] subpd %xmm3, %xmm5 movapd %xmm5, %xmm1 unpckhpd %xmm5, %xmm1 # xmm1 = xmm1[1],xmm5[1] addsd %xmm5, %xmm1 mulsd %xmm4, %xmm1 movl $.L.str.1, %edi movb $2, %al callq printf movq $0, 8(%rsp) movq 16(%rsp), %rbx movq %rbx, %r15 shrq $2, %r15 andq $-4, %rbx leaq 8(%rsp), %r14 .p2align 4, 0x90 .LBB0_2: # =>This Inner Loop Header: Depth=1 movq %r14, %rdi movq %rbx, %rsi callq hipMalloc cmpl $2, %eax jne .LBB0_4 # %bb.3: # in Loop: Header=BB0_2 Depth=1 movq 8(%rsp), %rdi callq hipFree addq $-262144, %r15 # imm = 0xFFFC0000 addq $-1048576, %rbx # imm = 0xFFF00000 cmpq $262143, %r15 # imm = 0x3FFFF ja .LBB0_2 .LBB0_4: movq 8(%rsp), %rdi callq hipFree callq hipGetLastError movl $.L.str.2, %edi movl %eax, %esi xorl %eax, %eax callq printf movq %r15, %xmm1 punpckldq .LCPI0_0(%rip), %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] subpd .LCPI0_1(%rip), %xmm1 movapd %xmm1, %xmm0 unpckhpd %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] addsd %xmm1, %xmm0 mulsd .LCPI0_3(%rip), %xmm0 movl $.L.str.3, %edi movb $1, %al callq printf addq $32, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB0_5: .cfi_def_cfa_offset 64 movl %eax, %edi callq hipGetErrorString movl $.L.str, %edi movq %rax, %rsi xorl %eax, %eax callq printf movl $1, %edi callq exit .Lfunc_end0: .size _Z12can_allocatev, .Lfunc_end0-_Z12can_allocatev .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $48, %rsp .cfi_def_cfa_offset 64 .cfi_offset %rbx, -16 leaq 40(%rsp), %rdi movl $410000000, %esi # imm = 0x18701A80 callq hipMalloc callq hipGetLastError movl $.L.str.4, %edi movl %eax, %esi xorl %eax, %eax callq printf leaq 16(%rsp), %rbx movl $524000000, %esi # imm = 0x1F3B9B00 movq %rbx, %rdi callq hipMalloc callq hipGetLastError movl $.L.str.4, %edi movl %eax, %esi xorl %eax, %eax callq printf leaq 32(%rsp), %rdi movl $3144000000, %esi # imm = 0xBB65A200 callq hipMalloc callq hipGetLastError movl $.L.str.2, %edi movl %eax, %esi xorl %eax, %eax callq printf leaq 8(%rsp), %rdi movl $524000000, %esi # imm = 0x1F3B9B00 callq hipMalloc callq hipGetLastError movl $.L.str.5, %edi movl %eax, %esi xorl %eax, %eax callq printf leaq 24(%rsp), %rdi movl $3144000000, %esi # imm = 0xBB65A200 callq hipMalloc callq hipGetLastError movl $.L.str.2, %edi movl %eax, %esi xorl %eax, %eax callq printf movq 16(%rsp), %rdi callq hipFree callq hipGetLastError movl $.L.str.6, %edi movl %eax, %esi xorl %eax, %eax callq printf movq 8(%rsp), %rdi callq hipFree callq hipGetLastError movl $.L.str.6, %edi movl %eax, %esi xorl %eax, %eax callq printf callq _Z12can_allocatev movl $1048000000, %esi # imm = 0x3E773600 movq %rbx, %rdi callq hipMalloc callq hipGetLastError movl $.L.str.2, %edi movl %eax, %esi xorl %eax, %eax callq printf movl $2096000000, %esi # imm = 0x7CEE6C00 movq %rbx, %rdi callq hipMalloc callq hipGetLastError movl $.L.str.2, %edi movl %eax, %esi xorl %eax, %eax callq printf movl $1000, %edi # imm = 0x3E8 callq sleep xorl %eax, %eax addq $48, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Error: hipMemGetInfo fails, %s \n" .size .L.str, 33 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "free: %.f, total %.f\n" .size .L.str.1, 22 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "err2: %d\n" .size .L.str.2, 11 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "can allocate: %.fMB\n" .size .L.str.3, 22 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "err1: %d\n" .size .L.str.4, 11 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "err3: %d\n" .size .L.str.5, 11 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "err4: %d\n" .size .L.str.6, 11 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_