system
stringclasses
7 values
input
stringlengths
10
94.5k
output
stringlengths
14
55.5k
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/* * purpose: just a demo to show how vector addition can be done on * the GPU with just a single thread block * compilation: nvcc ./single_thread_block_vector_addition.cu * usage: ./a.out */ #include <hip/hip_runtime.h> #include <stdio.h> #define N 100 /* * GPU kernel */ __global__ void VecAdd(float *A, float *B, float *C) { int i; i = threadIdx.x; C[i] = A[i] + B[i]; } /* * host main */ int main() { int i; dim3 numBlocks, threadsPerBlock; float *A, *B, *C; /* * using CUDA unified memory, first allocate * the memory then initialize with some dummy content */ hipMallocManaged(&A, N * sizeof(float)); hipMallocManaged(&B, N * sizeof(float)); hipMallocManaged(&C, N * sizeof(float)); for (i = 0; i < N; i++) { A[i] = (float) i; B[i] = (float) (N - i); C[i] = (float) 0; } /* * set up GPU kernel execution configuration * however, this time we send in explicit parameters * directly */ threadsPerBlock.x = N; numBlocks.x = 1; /* launch the GPU kernel */ VecAdd<<<1, N>>>(A, B, C); hipDeviceSynchronize(); /* print result */ for (i = 0; i < N; i++) { printf("%d %f\n", i, C[i]); } /* make clean */ hipFree(C); hipFree(B); hipFree(A); return(0); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6VecAddPfS_S_ .globl _Z6VecAddPfS_S_ .p2align 8 .type _Z6VecAddPfS_S_,@function _Z6VecAddPfS_S_: s_load_b128 s[4:7], s[0:1], 0x0 v_lshlrev_b32_e32 v0, 2, v0 s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_clause 0x1 global_load_b32 v1, v0, s[4:5] global_load_b32 v2, v0, s[6:7] s_waitcnt vmcnt(0) v_add_f32_e32 v1, v1, v2 global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6VecAddPfS_S_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 3 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6VecAddPfS_S_, .Lfunc_end0-_Z6VecAddPfS_S_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6VecAddPfS_S_ .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z6VecAddPfS_S_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 3 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* * purpose: just a demo to show how vector addition can be done on * the GPU with just a single thread block * compilation: nvcc ./single_thread_block_vector_addition.cu * usage: ./a.out */ #include <hip/hip_runtime.h> #include <stdio.h> #define N 100 /* * GPU kernel */ __global__ void VecAdd(float *A, float *B, float *C) { int i; i = threadIdx.x; C[i] = A[i] + B[i]; } /* * host main */ int main() { int i; dim3 numBlocks, threadsPerBlock; float *A, *B, *C; /* * using CUDA unified memory, first allocate * the memory then initialize with some dummy content */ hipMallocManaged(&A, N * sizeof(float)); hipMallocManaged(&B, N * sizeof(float)); hipMallocManaged(&C, N * sizeof(float)); for (i = 0; i < N; i++) { A[i] = (float) i; B[i] = (float) (N - i); C[i] = (float) 0; } /* * set up GPU kernel execution configuration * however, this time we send in explicit parameters * directly */ threadsPerBlock.x = N; numBlocks.x = 1; /* launch the GPU kernel */ VecAdd<<<1, N>>>(A, B, C); hipDeviceSynchronize(); /* print result */ for (i = 0; i < N; i++) { printf("%d %f\n", i, C[i]); } /* make clean */ hipFree(C); hipFree(B); hipFree(A); return(0); }
.text .file "single_thread_block_vector_addition.hip" .globl _Z21__device_stub__VecAddPfS_S_ # -- Begin function _Z21__device_stub__VecAddPfS_S_ .p2align 4, 0x90 .type _Z21__device_stub__VecAddPfS_S_,@function _Z21__device_stub__VecAddPfS_S_: # @_Z21__device_stub__VecAddPfS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6VecAddPfS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z21__device_stub__VecAddPfS_S_, .Lfunc_end0-_Z21__device_stub__VecAddPfS_S_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $128, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -16 leaq 16(%rsp), %rdi movl $400, %esi # imm = 0x190 movl $1, %edx callq hipMallocManaged leaq 8(%rsp), %rdi movl $400, %esi # imm = 0x190 movl $1, %edx callq hipMallocManaged movq %rsp, %rdi movl $400, %esi # imm = 0x190 movl $1, %edx callq hipMallocManaged movq 16(%rsp), %rax movq 8(%rsp), %rcx movl $100, %edx xorl %esi, %esi movq (%rsp), %rdi .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %esi, %xmm0 movss %xmm0, (%rax,%rsi,4) xorps %xmm0, %xmm0 cvtsi2ss %edx, %xmm0 movss %xmm0, (%rcx,%rsi,4) movl $0, (%rdi,%rsi,4) incq %rsi decq %rdx jne .LBB1_1 # %bb.2: movabsq $4294967297, %rdi # imm = 0x100000001 leaq 99(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_4 # %bb.3: movq 16(%rsp), %rax movq 8(%rsp), %rcx movq (%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movq %rdx, 72(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z6VecAddPfS_S_, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_4: callq hipDeviceSynchronize xorl %ebx, %ebx .p2align 4, 0x90 .LBB1_5: # =>This Inner Loop Header: Depth=1 movq (%rsp), %rax movss (%rax,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movl %ebx, %esi movb $1, %al callq printf incq %rbx cmpq $100, %rbx jne .LBB1_5 # %bb.6: movq (%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree xorl %eax, %eax addq $128, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6VecAddPfS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z6VecAddPfS_S_,@object # @_Z6VecAddPfS_S_ .section .rodata,"a",@progbits .globl _Z6VecAddPfS_S_ .p2align 3, 0x0 _Z6VecAddPfS_S_: .quad _Z21__device_stub__VecAddPfS_S_ .size _Z6VecAddPfS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%d %f\n" .size .L.str, 7 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6VecAddPfS_S_" .size .L__unnamed_1, 16 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__VecAddPfS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6VecAddPfS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z6VecAddPfS_S_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */ /* 0x000e220000002100 */ /*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x001fc800078e0207 */ /*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x0c0fe400078e0207 */ /*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1900 */ /*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea2000c1e1900 */ /*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fc800078e0207 */ /*0090*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */ /* 0x004fca0000000000 */ /*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00b0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6VecAddPfS_S_ .globl _Z6VecAddPfS_S_ .p2align 8 .type _Z6VecAddPfS_S_,@function _Z6VecAddPfS_S_: s_load_b128 s[4:7], s[0:1], 0x0 v_lshlrev_b32_e32 v0, 2, v0 s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_clause 0x1 global_load_b32 v1, v0, s[4:5] global_load_b32 v2, v0, s[6:7] s_waitcnt vmcnt(0) v_add_f32_e32 v1, v1, v2 global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6VecAddPfS_S_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 3 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6VecAddPfS_S_, .Lfunc_end0-_Z6VecAddPfS_S_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6VecAddPfS_S_ .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z6VecAddPfS_S_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 3 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001087e7_00000000-6_single_thread_block_vector_addition.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z29__device_stub__Z6VecAddPfS_S_PfS_S_ .type _Z29__device_stub__Z6VecAddPfS_S_PfS_S_, @function _Z29__device_stub__Z6VecAddPfS_S_PfS_S_: .LFB2082: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z6VecAddPfS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z29__device_stub__Z6VecAddPfS_S_PfS_S_, .-_Z29__device_stub__Z6VecAddPfS_S_PfS_S_ .globl _Z6VecAddPfS_S_ .type _Z6VecAddPfS_S_, @function _Z6VecAddPfS_S_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z6VecAddPfS_S_PfS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z6VecAddPfS_S_, .-_Z6VecAddPfS_S_ .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "%d %f\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $72, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax leaq 8(%rsp), %rdi movl $1, %edx movl $400, %esi call cudaMallocManaged@PLT leaq 16(%rsp), %rdi movl $1, %edx movl $400, %esi call cudaMallocManaged@PLT leaq 24(%rsp), %rdi movl $1, %edx movl $400, %esi call cudaMallocManaged@PLT movl $0, %eax movl $100, %ecx .L12: pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 movq 8(%rsp), %rdx movss %xmm0, (%rdx,%rax,4) movl %ecx, %edx subl %eax, %edx pxor %xmm0, %xmm0 cvtsi2ssl %edx, %xmm0 movq 16(%rsp), %rdx movss %xmm0, (%rdx,%rax,4) movq 24(%rsp), %rdx movl $0x00000000, (%rdx,%rax,4) addq $1, %rax cmpq $100, %rax jne .L12 movl $100, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L19 .L13: call cudaDeviceSynchronize@PLT movl $0, %ebx leaq .LC1(%rip), %rbp .L14: movq 24(%rsp), %rax pxor %xmm0, %xmm0 cvtss2sd (%rax,%rbx,4), %xmm0 movl %ebx, %edx movq %rbp, %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT addq $1, %rbx cmpq $100, %rbx jne .L14 movq 24(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L20 movl $0, %eax addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L19: .cfi_restore_state movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z29__device_stub__Z6VecAddPfS_S_PfS_S_ jmp .L13 .L20: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC2: .string "_Z6VecAddPfS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z6VecAddPfS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "single_thread_block_vector_addition.hip" .globl _Z21__device_stub__VecAddPfS_S_ # -- Begin function _Z21__device_stub__VecAddPfS_S_ .p2align 4, 0x90 .type _Z21__device_stub__VecAddPfS_S_,@function _Z21__device_stub__VecAddPfS_S_: # @_Z21__device_stub__VecAddPfS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6VecAddPfS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z21__device_stub__VecAddPfS_S_, .Lfunc_end0-_Z21__device_stub__VecAddPfS_S_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $128, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -16 leaq 16(%rsp), %rdi movl $400, %esi # imm = 0x190 movl $1, %edx callq hipMallocManaged leaq 8(%rsp), %rdi movl $400, %esi # imm = 0x190 movl $1, %edx callq hipMallocManaged movq %rsp, %rdi movl $400, %esi # imm = 0x190 movl $1, %edx callq hipMallocManaged movq 16(%rsp), %rax movq 8(%rsp), %rcx movl $100, %edx xorl %esi, %esi movq (%rsp), %rdi .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %esi, %xmm0 movss %xmm0, (%rax,%rsi,4) xorps %xmm0, %xmm0 cvtsi2ss %edx, %xmm0 movss %xmm0, (%rcx,%rsi,4) movl $0, (%rdi,%rsi,4) incq %rsi decq %rdx jne .LBB1_1 # %bb.2: movabsq $4294967297, %rdi # imm = 0x100000001 leaq 99(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_4 # %bb.3: movq 16(%rsp), %rax movq 8(%rsp), %rcx movq (%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movq %rdx, 72(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z6VecAddPfS_S_, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_4: callq hipDeviceSynchronize xorl %ebx, %ebx .p2align 4, 0x90 .LBB1_5: # =>This Inner Loop Header: Depth=1 movq (%rsp), %rax movss (%rax,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movl %ebx, %esi movb $1, %al callq printf incq %rbx cmpq $100, %rbx jne .LBB1_5 # %bb.6: movq (%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree xorl %eax, %eax addq $128, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6VecAddPfS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z6VecAddPfS_S_,@object # @_Z6VecAddPfS_S_ .section .rodata,"a",@progbits .globl _Z6VecAddPfS_S_ .p2align 3, 0x0 _Z6VecAddPfS_S_: .quad _Z21__device_stub__VecAddPfS_S_ .size _Z6VecAddPfS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%d %f\n" .size .L.str, 7 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6VecAddPfS_S_" .size .L__unnamed_1, 16 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__VecAddPfS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6VecAddPfS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <cuda_runtime.h> #include <stdio.h> #include <iostream> using namespace std; __global__ void checkIndex(void) { // printf("- thread idx is : "); printf( "thread idx: %d, %d, %d\n" , threadIdx.x , threadIdx.y , threadIdx.z ); printf( "block idx: %d , %d, %d\n", blockIdx.x , blockIdx.y , blockIdx.z ); printf ("block dim: %d , %d, %d\n" ,blockDim.x , blockDim.y , blockDim.z ); printf( "grid dim: %d , %d, %d\n", gridDim.x , gridDim.y , gridDim.z); printf("-"); } int main() { int n = 10; dim3 block(3); dim3 grid((n + block.x -1)/block.x); cout << " grid x: " << grid.x << " grid.y: " << grid.y << " grid.z : " << grid.z << std::endl; cout << " block x: " << block.x << " block.y : " << block.y<< " block.z: " << block.z << std::endl; checkIndex<<<grid, block>>>(); cudaDeviceSynchronize(); } int main1(int argc, char** argv) { std::cout << " Starting." << std::endl; int deviceCount = 0; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { cout << "There is no device available." << std::endl; } else { cout << "Detected " << deviceCount << " cuda capable device." << endl; } int dev = 0; cudaDeviceProp deviceProp; cudaSetDevice(dev); cudaGetDeviceProperties(&deviceProp, dev); cout << "Deivce id: " << dev << " " << deviceProp.name << endl; int dversion, runtimeVersion; cudaDriverGetVersion(&dversion); cudaRuntimeGetVersion(&runtimeVersion); cout << "CUDA driver version : " << dversion << " runtime version: " << runtimeVersion << endl; cout << "Total mem: " << deviceProp.totalGlobalMem/(1024*1024*1024) << endl; cudaDeviceReset(); cout << "After device reset." << std::endl; //int dev2 = 2; //cudaSetDevice(dev2); //cudaGetDeviceProperties(&deviceProp, dev2); //cout << "Deivce id: " << dev2 << " " << deviceProp.name << endl; return 1; }
code for sm_80 Function : _Z10checkIndexv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fc800078e00ff */ /*0010*/ S2R R11, SR_TID.Y ; /* 0x00000000000b7919 */ /* 0x000e220000002200 */ /*0020*/ IADD3 R1, R1, -0x10, RZ ; /* 0xfffffff001017810 */ /* 0x000fe20007ffe0ff */ /*0030*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */ /* 0x000fe200078e00ff */ /*0040*/ MOV R2, 0x0 ; /* 0x0000000000027802 */ /* 0x000fe20000000f00 */ /*0050*/ S2R R10, SR_TID.X ; /* 0x00000000000a7919 */ /* 0x000e220000002100 */ /*0060*/ IADD3 R16, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001107a10 */ /* 0x000fe20007f1e0ff */ /*0070*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */ /* 0x000fe200078e00ff */ /*0080*/ LDC.64 R8, c[0x4][R2] ; /* 0x0100000002087b82 */ /* 0x0002a20000000a00 */ /*0090*/ S2R R0, SR_TID.Z ; /* 0x0000000000007919 */ /* 0x000ee40000002300 */ /*00a0*/ IMAD.X R17, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff117624 */ /* 0x000fc400000e06ff */ /*00b0*/ IMAD.MOV.U32 R6, RZ, RZ, R16 ; /* 0x000000ffff067224 */ /* 0x000fe400078e0010 */ /*00c0*/ IMAD.MOV.U32 R7, RZ, RZ, R17 ; /* 0x000000ffff077224 */ /* 0x000fe200078e0011 */ /*00d0*/ STL.64 [R1], R10 ; /* 0x0000000a01007387 */ /* 0x0013e80000100a00 */ /*00e0*/ STL [R1+0x8], R0 ; /* 0x0000080001007387 */ /* 0x0083e40000100800 */ /*00f0*/ LEPC R10 ; /* 0x00000000000a734e */ /* 0x006fe40000000000 */ /*0100*/ MOV R3, 0x170 ; /* 0x0000017000037802 */ /* 0x000fe40000000f00 */ /*0110*/ MOV R20, 0xf0 ; /* 0x000000f000147802 */ /* 0x000fe40000000f00 */ /*0120*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fc40000000f00 */ /*0130*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*0140*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */ /* 0x000fc8000791e10a */ /*0150*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e250b */ /*0160*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */ /* 0x000fea0003c00000 */ /*0170*/ S2R R11, SR_CTAID.Y ; /* 0x00000000000b7919 */ /* 0x000e220000002600 */ /*0180*/ LDC.64 R8, c[0x4][R2] ; /* 0x0100000002087b82 */ /* 0x0002a20000000a00 */ /*0190*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x10] ; /* 0x01000400ff047624 */ /* 0x000fe400078e00ff */ /*01a0*/ S2R R10, SR_CTAID.X ; /* 0x00000000000a7919 */ /* 0x000e220000002500 */ /*01b0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x14] ; /* 0x01000500ff057624 */ /* 0x000fe400078e00ff */ /*01c0*/ IMAD.MOV.U32 R6, RZ, RZ, R16 ; /* 0x000000ffff067224 */ /* 0x000fe200078e0010 */ /*01d0*/ S2R R0, SR_CTAID.Z ; /* 0x0000000000007919 */ /* 0x000ee20000002700 */ /*01e0*/ IMAD.MOV.U32 R7, RZ, RZ, R17 ; /* 0x000000ffff077224 */ /* 0x000fc600078e0011 */ /*01f0*/ STL.64 [R1], R10 ; /* 0x0000000a01007387 */ /* 0x0013e80000100a00 */ /*0200*/ STL [R1+0x8], R0 ; /* 0x0000080001007387 */ /* 0x0083e40000100800 */ /*0210*/ LEPC R10 ; /* 0x00000000000a734e */ /* 0x006fe40000000000 */ /*0220*/ MOV R3, 0x290 ; /* 0x0000029000037802 */ /* 0x000fe40000000f00 */ /*0230*/ MOV R20, 0x210 ; /* 0x0000021000147802 */ /* 0x000fe40000000f00 */ /*0240*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*0250*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fc40000000f00 */ /*0260*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */ /* 0x000fc8000791e10a */ /*0270*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e250b */ /*0280*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */ /* 0x000fea0003c00000 */ /*0290*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0a7624 */ /* 0x000fe200078e00ff */ /*02a0*/ LDC.64 R8, c[0x4][R2] ; /* 0x0100000002087b82 */ /* 0x0000620000000a00 */ /*02b0*/ IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x4] ; /* 0x00000100ff0b7624 */ /* 0x000fe400078e00ff */ /*02c0*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x8] ; /* 0x00000200ff007624 */ /* 0x000fe400078e00ff */ /*02d0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x18] ; /* 0x01000600ff047624 */ /* 0x000fe200078e00ff */ /*02e0*/ STL.64 [R1], R10 ; /* 0x0000000a01007387 */ /* 0x0001e20000100a00 */ /*02f0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x1c] ; /* 0x01000700ff057624 */ /* 0x000fe400078e00ff */ /*0300*/ IMAD.MOV.U32 R6, RZ, RZ, R16 ; /* 0x000000ffff067224 */ /* 0x000fe200078e0010 */ /*0310*/ STL [R1+0x8], R0 ; /* 0x0000080001007387 */ /* 0x0001e20000100800 */ /*0320*/ IMAD.MOV.U32 R7, RZ, RZ, R17 ; /* 0x000000ffff077224 */ /* 0x000fc600078e0011 */ /*0330*/ LEPC R10 ; /* 0x00000000000a734e */ /* 0x001fc80000000000 */ /*0340*/ MOV R3, 0x3b0 ; /* 0x000003b000037802 */ /* 0x000fe40000000f00 */ /*0350*/ MOV R20, 0x330 ; /* 0x0000033000147802 */ /* 0x000fc40000000f00 */ /*0360*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*0370*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*0380*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */ /* 0x000fc8000791e10a */ /*0390*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e250b */ /*03a0*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */ /* 0x002fea0003c00000 */ /*03b0*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0xc] ; /* 0x00000300ff0a7624 */ /* 0x000fe200078e00ff */ /*03c0*/ LDC.64 R8, c[0x4][R2] ; /* 0x0100000002087b82 */ /* 0x0000620000000a00 */ /*03d0*/ IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x10] ; /* 0x00000400ff0b7624 */ /* 0x000fe400078e00ff */ /*03e0*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x14] ; /* 0x00000500ff007624 */ /* 0x000fe400078e00ff */ /*03f0*/ IMAD.MOV.U32 R6, RZ, RZ, R16 ; /* 0x000000ffff067224 */ /* 0x000fe200078e0010 */ /*0400*/ STL.64 [R1], R10 ; /* 0x0000000a01007387 */ /* 0x0001e20000100a00 */ /*0410*/ IMAD.MOV.U32 R7, RZ, RZ, R17 ; /* 0x000000ffff077224 */ /* 0x000fe400078e0011 */ /*0420*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x20] ; /* 0x01000800ff047624 */ /* 0x000fe200078e00ff */ /*0430*/ STL [R1+0x8], R0 ; /* 0x0000080001007387 */ /* 0x0001e20000100800 */ /*0440*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x24] ; /* 0x01000900ff057624 */ /* 0x000fc600078e00ff */ /*0450*/ LEPC R10 ; /* 0x00000000000a734e */ /* 0x001fc80000000000 */ /*0460*/ MOV R3, 0x4d0 ; /* 0x000004d000037802 */ /* 0x000fe40000000f00 */ /*0470*/ MOV R20, 0x450 ; /* 0x0000045000147802 */ /* 0x000fc40000000f00 */ /*0480*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*0490*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*04a0*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */ /* 0x000fc8000791e10a */ /*04b0*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e250b */ /*04c0*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */ /* 0x002fea0003c00000 */ /*04d0*/ LDC.64 R2, c[0x4][R2] ; /* 0x0100000002027b82 */ /* 0x000e220000000a00 */ /*04e0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x28] ; /* 0x01000a00ff047624 */ /* 0x000fe200078e00ff */ /*04f0*/ CS2R R6, SRZ ; /* 0x0000000000067805 */ /* 0x000fe2000001ff00 */ /*0500*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x2c] ; /* 0x01000b00ff057624 */ /* 0x000fca00078e00ff */ /*0510*/ LEPC R8 ; /* 0x000000000008734e */ /* 0x000fe40000000000 */ /*0520*/ MOV R11, 0x590 ; /* 0x00000590000b7802 */ /* 0x000fe40000000f00 */ /*0530*/ MOV R20, 0x510 ; /* 0x0000051000147802 */ /* 0x000fe40000000f00 */ /*0540*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*0550*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*0560*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */ /* 0x000fc8000791e108 */ /*0570*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2509 */ /*0580*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */ /* 0x001fea0003c00000 */ /*0590*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*05a0*/ BRA 0x5a0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*05b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0600*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0610*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0620*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0630*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0640*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0650*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0660*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0670*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cuda_runtime.h> #include <stdio.h> #include <iostream> using namespace std; __global__ void checkIndex(void) { // printf("- thread idx is : "); printf( "thread idx: %d, %d, %d\n" , threadIdx.x , threadIdx.y , threadIdx.z ); printf( "block idx: %d , %d, %d\n", blockIdx.x , blockIdx.y , blockIdx.z ); printf ("block dim: %d , %d, %d\n" ,blockDim.x , blockDim.y , blockDim.z ); printf( "grid dim: %d , %d, %d\n", gridDim.x , gridDim.y , gridDim.z); printf("-"); } int main() { int n = 10; dim3 block(3); dim3 grid((n + block.x -1)/block.x); cout << " grid x: " << grid.x << " grid.y: " << grid.y << " grid.z : " << grid.z << std::endl; cout << " block x: " << block.x << " block.y : " << block.y<< " block.z: " << block.z << std::endl; checkIndex<<<grid, block>>>(); cudaDeviceSynchronize(); } int main1(int argc, char** argv) { std::cout << " Starting." << std::endl; int deviceCount = 0; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { cout << "There is no device available." << std::endl; } else { cout << "Detected " << deviceCount << " cuda capable device." << endl; } int dev = 0; cudaDeviceProp deviceProp; cudaSetDevice(dev); cudaGetDeviceProperties(&deviceProp, dev); cout << "Deivce id: " << dev << " " << deviceProp.name << endl; int dversion, runtimeVersion; cudaDriverGetVersion(&dversion); cudaRuntimeGetVersion(&runtimeVersion); cout << "CUDA driver version : " << dversion << " runtime version: " << runtimeVersion << endl; cout << "Total mem: " << deviceProp.totalGlobalMem/(1024*1024*1024) << endl; cudaDeviceReset(); cout << "After device reset." << std::endl; //int dev2 = 2; //cudaSetDevice(dev2); //cudaGetDeviceProperties(&deviceProp, dev2); //cout << "Deivce id: " << dev2 << " " << deviceProp.name << endl; return 1; }
.file "tmpxft_0019e48c_00000000-6_getDeviceInfo.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3673: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3673: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string " Starting." .LC1: .string "There is no device available." .LC2: .string "Detected " .LC3: .string " cuda capable device." .LC4: .string "Deivce id: " .LC5: .string " " .LC6: .string "CUDA driver version : " .LC7: .string " runtime version: " .LC8: .string "Total mem: " .LC9: .string "After device reset." .text .globl _Z5main1iPPc .type _Z5main1iPPc, @function _Z5main1iPPc: .LFB3670: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $1064, %rsp .cfi_def_cfa_offset 1088 movq %fs:40, %rax movq %rax, 1048(%rsp) xorl %eax, %eax movl $10, %edx leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbx testq %rbx, %rbx je .L36 cmpb $0, 56(%rbx) je .L6 movzbl 67(%rbx), %esi .L7: movsbl %sil, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $0, 4(%rsp) leaq 4(%rsp), %rdi call cudaGetDeviceCount@PLT cmpl $0, 4(%rsp) jne .L8 movl $29, %edx leaq .LC1(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbx testq %rbx, %rbx je .L37 cmpb $0, 56(%rbx) je .L11 movzbl 67(%rbx), %esi .L12: movsbl %sil, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT .L13: movl $0, %edi call cudaSetDevice@PLT leaq 16(%rsp), %rbp movl $0, %esi movq %rbp, %rdi call cudaGetDeviceProperties_v2@PLT movl $11, %edx leaq .LC4(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl $0, %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movl $1, %edx leaq .LC5(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq %rbp, %rdi call strlen@PLT movq %rax, %rdx movq %rbp, %rsi movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L38 cmpb $0, 56(%rbp) je .L20 movzbl 67(%rbp), %esi .L21: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT leaq 8(%rsp), %rdi call cudaDriverGetVersion@PLT leaq 12(%rsp), %rdi call cudaRuntimeGetVersion@PLT movl $22, %edx leaq .LC6(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl 8(%rsp), %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movl $18, %edx leaq .LC7(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl 12(%rsp), %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L39 cmpb $0, 56(%rbp) je .L24 movzbl 67(%rbp), %esi .L25: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $11, %edx leaq .LC8(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq 304(%rsp), %rsi shrq $30, %rsi movq %rbx, %rdi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L40 cmpb $0, 56(%rbp) je .L28 movzbl 67(%rbp), %esi .L29: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT call cudaDeviceReset@PLT movl $19, %edx leaq .LC9(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbx testq %rbx, %rbx je .L41 cmpb $0, 56(%rbx) je .L32 movzbl 67(%rbx), %esi .L33: movsbl %sil, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movq 1048(%rsp), %rax subq %fs:40, %rax jne .L42 movl $1, %eax addq $1064, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L36: .cfi_restore_state movq 1048(%rsp), %rax subq %fs:40, %rax jne .L43 call _ZSt16__throw_bad_castv@PLT .L43: call __stack_chk_fail@PLT .L6: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) movl %eax, %esi jmp .L7 .L37: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L44 call _ZSt16__throw_bad_castv@PLT .L44: call __stack_chk_fail@PLT .L11: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) movl %eax, %esi jmp .L12 .L8: movl $9, %edx leaq .LC2(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl 4(%rsp), %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movl $21, %edx leaq .LC3(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L45 cmpb $0, 56(%rbp) je .L16 movzbl 67(%rbp), %esi .L17: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT jmp .L13 .L45: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L46 call _ZSt16__throw_bad_castv@PLT .L46: call __stack_chk_fail@PLT .L16: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L17 .L38: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L47 call _ZSt16__throw_bad_castv@PLT .L47: call __stack_chk_fail@PLT .L20: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L21 .L39: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L48 call _ZSt16__throw_bad_castv@PLT .L48: call __stack_chk_fail@PLT .L24: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L25 .L40: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L49 call _ZSt16__throw_bad_castv@PLT .L49: call __stack_chk_fail@PLT .L28: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L29 .L41: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L50 call _ZSt16__throw_bad_castv@PLT .L50: call __stack_chk_fail@PLT .L32: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) movl %eax, %esi jmp .L33 .L42: call __stack_chk_fail@PLT .cfi_endproc .LFE3670: .size _Z5main1iPPc, .-_Z5main1iPPc .globl _Z29__device_stub__Z10checkIndexvv .type _Z29__device_stub__Z10checkIndexvv, @function _Z29__device_stub__Z10checkIndexvv: .LFB3695: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L55 .L51: movq 72(%rsp), %rax subq %fs:40, %rax jne .L56 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L55: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z10checkIndexv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L51 .L56: call __stack_chk_fail@PLT .cfi_endproc .LFE3695: .size _Z29__device_stub__Z10checkIndexvv, .-_Z29__device_stub__Z10checkIndexvv .globl _Z10checkIndexv .type _Z10checkIndexv, @function _Z10checkIndexv: .LFB3696: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z10checkIndexvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3696: .size _Z10checkIndexv, .-_Z10checkIndexv .section .rodata.str1.1 .LC10: .string " grid x: " .LC11: .string " grid.y: " .LC12: .string " grid.z : " .LC13: .string " block x: " .LC14: .string " block.y : " .LC15: .string " block.z: " .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $32, %rsp .cfi_def_cfa_offset 48 leaq .LC10(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $4, %esi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rdi leaq .LC11(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $1, %esi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rdi leaq .LC12(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $1, %esi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT leaq .LC13(%rip), %rsi movq %rbx, %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $3, %esi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rdi leaq .LC14(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $1, %esi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rdi leaq .LC15(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $1, %esi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $4, 20(%rsp) movl $1, 24(%rsp) movl $3, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 8(%rsp), %rdx movl $1, %ecx movq 20(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L62 .L60: call cudaDeviceSynchronize@PLT movl $0, %eax addq $32, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L62: .cfi_restore_state call _Z29__device_stub__Z10checkIndexvv jmp .L60 .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.1 .LC16: .string "_Z10checkIndexv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3698: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC16(%rip), %rdx movq %rdx, %rcx leaq _Z10checkIndexv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3698: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cuda_runtime.h> #include <stdio.h> #include <iostream> using namespace std; __global__ void checkIndex(void) { // printf("- thread idx is : "); printf( "thread idx: %d, %d, %d\n" , threadIdx.x , threadIdx.y , threadIdx.z ); printf( "block idx: %d , %d, %d\n", blockIdx.x , blockIdx.y , blockIdx.z ); printf ("block dim: %d , %d, %d\n" ,blockDim.x , blockDim.y , blockDim.z ); printf( "grid dim: %d , %d, %d\n", gridDim.x , gridDim.y , gridDim.z); printf("-"); } int main() { int n = 10; dim3 block(3); dim3 grid((n + block.x -1)/block.x); cout << " grid x: " << grid.x << " grid.y: " << grid.y << " grid.z : " << grid.z << std::endl; cout << " block x: " << block.x << " block.y : " << block.y<< " block.z: " << block.z << std::endl; checkIndex<<<grid, block>>>(); cudaDeviceSynchronize(); } int main1(int argc, char** argv) { std::cout << " Starting." << std::endl; int deviceCount = 0; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { cout << "There is no device available." << std::endl; } else { cout << "Detected " << deviceCount << " cuda capable device." << endl; } int dev = 0; cudaDeviceProp deviceProp; cudaSetDevice(dev); cudaGetDeviceProperties(&deviceProp, dev); cout << "Deivce id: " << dev << " " << deviceProp.name << endl; int dversion, runtimeVersion; cudaDriverGetVersion(&dversion); cudaRuntimeGetVersion(&runtimeVersion); cout << "CUDA driver version : " << dversion << " runtime version: " << runtimeVersion << endl; cout << "Total mem: " << deviceProp.totalGlobalMem/(1024*1024*1024) << endl; cudaDeviceReset(); cout << "After device reset." << std::endl; //int dev2 = 2; //cudaSetDevice(dev2); //cudaGetDeviceProperties(&deviceProp, dev2); //cout << "Deivce id: " << dev2 << " " << deviceProp.name << endl; return 1; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <iostream> using namespace std; __global__ void checkIndex(void) { // printf("- thread idx is : "); printf( "thread idx: %d, %d, %d\n" , threadIdx.x , threadIdx.y , threadIdx.z ); printf( "block idx: %d , %d, %d\n", blockIdx.x , blockIdx.y , blockIdx.z ); printf ("block dim: %d , %d, %d\n" ,blockDim.x , blockDim.y , blockDim.z ); printf( "grid dim: %d , %d, %d\n", gridDim.x , gridDim.y , gridDim.z); printf("-"); } int main() { int n = 10; dim3 block(3); dim3 grid((n + block.x -1)/block.x); cout << " grid x: " << grid.x << " grid.y: " << grid.y << " grid.z : " << grid.z << std::endl; cout << " block x: " << block.x << " block.y : " << block.y<< " block.z: " << block.z << std::endl; checkIndex<<<grid, block>>>(); hipDeviceSynchronize(); } int main1(int argc, char** argv) { std::cout << " Starting." << std::endl; int deviceCount = 0; hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { cout << "There is no device available." << std::endl; } else { cout << "Detected " << deviceCount << " cuda capable device." << endl; } int dev = 0; hipDeviceProp_t deviceProp; hipSetDevice(dev); hipGetDeviceProperties(&deviceProp, dev); cout << "Deivce id: " << dev << " " << deviceProp.name << endl; int dversion, runtimeVersion; hipDriverGetVersion(&dversion); hipRuntimeGetVersion(&runtimeVersion); cout << "CUDA driver version : " << dversion << " runtime version: " << runtimeVersion << endl; cout << "Total mem: " << deviceProp.totalGlobalMem/(1024*1024*1024) << endl; hipDeviceReset(); cout << "After device reset." << std::endl; //int dev2 = 2; //cudaSetDevice(dev2); //cudaGetDeviceProperties(&deviceProp, dev2); //cout << "Deivce id: " << dev2 << " " << deviceProp.name << endl; return 1; }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <iostream> using namespace std; __global__ void checkIndex(void) { // printf("- thread idx is : "); printf( "thread idx: %d, %d, %d\n" , threadIdx.x , threadIdx.y , threadIdx.z ); printf( "block idx: %d , %d, %d\n", blockIdx.x , blockIdx.y , blockIdx.z ); printf ("block dim: %d , %d, %d\n" ,blockDim.x , blockDim.y , blockDim.z ); printf( "grid dim: %d , %d, %d\n", gridDim.x , gridDim.y , gridDim.z); printf("-"); } int main() { int n = 10; dim3 block(3); dim3 grid((n + block.x -1)/block.x); cout << " grid x: " << grid.x << " grid.y: " << grid.y << " grid.z : " << grid.z << std::endl; cout << " block x: " << block.x << " block.y : " << block.y<< " block.z: " << block.z << std::endl; checkIndex<<<grid, block>>>(); hipDeviceSynchronize(); } int main1(int argc, char** argv) { std::cout << " Starting." << std::endl; int deviceCount = 0; hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { cout << "There is no device available." << std::endl; } else { cout << "Detected " << deviceCount << " cuda capable device." << endl; } int dev = 0; hipDeviceProp_t deviceProp; hipSetDevice(dev); hipGetDeviceProperties(&deviceProp, dev); cout << "Deivce id: " << dev << " " << deviceProp.name << endl; int dversion, runtimeVersion; hipDriverGetVersion(&dversion); hipRuntimeGetVersion(&runtimeVersion); cout << "CUDA driver version : " << dversion << " runtime version: " << runtimeVersion << endl; cout << "Total mem: " << deviceProp.totalGlobalMem/(1024*1024*1024) << endl; hipDeviceReset(); cout << "After device reset." << std::endl; //int dev2 = 2; //cudaSetDevice(dev2); //cudaGetDeviceProperties(&deviceProp, dev2); //cout << "Deivce id: " << dev2 << " " << deviceProp.name << endl; return 1; }
.text .file "getDeviceInfo.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z25__device_stub__checkIndexv # -- Begin function _Z25__device_stub__checkIndexv .p2align 4, 0x90 .type _Z25__device_stub__checkIndexv,@function _Z25__device_stub__checkIndexv: # @_Z25__device_stub__checkIndexv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z10checkIndexv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z25__device_stub__checkIndexv, .Lfunc_end0-_Z25__device_stub__checkIndexv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $56, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl $_ZSt4cout, %edi movl $.L.str, %esi movl $9, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movl $4, %esi callq _ZNSo9_M_insertImEERSoT_ movq %rax, %rbx movl $.L.str.1, %esi movl $9, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $1, %esi movq %rbx, %rdi callq _ZNSo9_M_insertImEERSoT_ movq %rax, %rbx movl $.L.str.2, %esi movl $10, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $1, %esi movq %rbx, %rdi callq _ZNSo9_M_insertImEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB1_11 # %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%rbx) je .LBB1_3 # %bb.2: movzbl 67(%rbx), %ecx jmp .LBB1_4 .LBB1_3: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB1_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $_ZSt4cout, %edi movl $.L.str.3, %esi movl $10, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movl $3, %esi callq _ZNSo9_M_insertImEERSoT_ movq %rax, %rbx movl $.L.str.4, %esi movl $11, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $1, %esi movq %rbx, %rdi callq _ZNSo9_M_insertImEERSoT_ movq %rax, %rbx movl $.L.str.5, %esi movl $10, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $1, %esi movq %rbx, %rdi callq _ZNSo9_M_insertImEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB1_11 # %bb.5: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i9 cmpb $0, 56(%rbx) je .LBB1_7 # %bb.6: movzbl 67(%rbx), %ecx jmp .LBB1_8 .LBB1_7: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB1_8: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit12 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movabsq $4294967299, %rdx # imm = 0x100000003 leaq 1(%rdx), %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_10 # %bb.9: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z10checkIndexv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_10: callq hipDeviceSynchronize xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .LBB1_11: .cfi_def_cfa_offset 80 callq _ZSt16__throw_bad_castv .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .globl _Z5main1iPPc # -- Begin function _Z5main1iPPc .p2align 4, 0x90 .type _Z5main1iPPc,@function _Z5main1iPPc: # @_Z5main1iPPc .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $1496, %rsp # imm = 0x5D8 .cfi_def_cfa_offset 1520 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl $_ZSt4cout, %edi movl $.L.str.6, %esi movl $10, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB2_32 # %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%rbx) je .LBB2_3 # %bb.2: movzbl 67(%rbx), %eax jmp .LBB2_4 .LBB2_3: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB2_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $0, 12(%rsp) leaq 12(%rsp), %rdi callq hipGetDeviceCount cmpl $0, 12(%rsp) je .LBB2_5 # %bb.10: movl $_ZSt4cout, %edi movl $.L.str.8, %esi movl $9, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl 12(%rsp), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movq %rax, %rbx movl $.L.str.9, %esi movl $21, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r14 testq %r14, %r14 je .LBB2_32 # %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i9 cmpb $0, 56(%r14) je .LBB2_13 # %bb.12: movzbl 67(%r14), %eax jmp .LBB2_14 .LBB2_5: movl $_ZSt4cout, %edi movl $.L.str.7, %esi movl $29, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB2_32 # %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i4 cmpb $0, 56(%rbx) je .LBB2_8 # %bb.7: movzbl 67(%rbx), %eax jmp .LBB2_9 .LBB2_13: movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) .LBB2_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit12 movsbl %al, %esi movq %rbx, %rdi jmp .LBB2_15 .LBB2_8: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB2_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit7 movsbl %al, %esi movl $_ZSt4cout, %edi .LBB2_15: callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv xorl %edi, %edi callq hipSetDevice leaq 24(%rsp), %r14 movq %r14, %rdi xorl %esi, %esi callq hipGetDevicePropertiesR0600 movl $_ZSt4cout, %edi movl $.L.str.10, %esi movl $11, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi xorl %esi, %esi callq _ZNSolsEi movq %rax, %rbx movl $.L.str.11, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq %r14, %rdi callq strlen movq %rbx, %rdi movq %r14, %rsi movq %rax, %rdx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r14 testq %r14, %r14 je .LBB2_32 # %bb.16: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i14 cmpb $0, 56(%r14) je .LBB2_18 # %bb.17: movzbl 67(%r14), %eax jmp .LBB2_19 .LBB2_18: movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) .LBB2_19: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit17 movsbl %al, %esi movq %rbx, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv leaq 20(%rsp), %rdi callq hipDriverGetVersion leaq 16(%rsp), %rdi callq hipRuntimeGetVersion movl $_ZSt4cout, %edi movl $.L.str.12, %esi movl $22, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl 20(%rsp), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movq %rax, %rbx movl $.L.str.13, %esi movl $18, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl 16(%rsp), %esi movq %rbx, %rdi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB2_32 # %bb.20: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i19 cmpb $0, 56(%rbx) je .LBB2_22 # %bb.21: movzbl 67(%rbx), %ecx jmp .LBB2_23 .LBB2_22: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB2_23: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit22 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $_ZSt4cout, %edi movl $.L.str.14, %esi movl $11, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq 312(%rsp), %rsi shrq $30, %rsi movl $_ZSt4cout, %edi callq _ZNSo9_M_insertImEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB2_32 # %bb.24: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i24 cmpb $0, 56(%rbx) je .LBB2_26 # %bb.25: movzbl 67(%rbx), %ecx jmp .LBB2_27 .LBB2_26: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB2_27: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit27 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv callq hipDeviceReset movl $_ZSt4cout, %edi movl $.L.str.15, %esi movl $19, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB2_32 # %bb.28: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i29 cmpb $0, 56(%rbx) je .LBB2_30 # %bb.29: movzbl 67(%rbx), %eax jmp .LBB2_31 .LBB2_30: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB2_31: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit32 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $1, %eax addq $1496, %rsp # imm = 0x5D8 .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .LBB2_32: .cfi_def_cfa_offset 1520 callq _ZSt16__throw_bad_castv .Lfunc_end2: .size _Z5main1iPPc, .Lfunc_end2-_Z5main1iPPc .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10checkIndexv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z10checkIndexv,@object # @_Z10checkIndexv .section .rodata,"a",@progbits .globl _Z10checkIndexv .p2align 3, 0x0 _Z10checkIndexv: .quad _Z25__device_stub__checkIndexv .size _Z10checkIndexv, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz " grid x: " .size .L.str, 10 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz " grid.y: " .size .L.str.1, 10 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz " grid.z : " .size .L.str.2, 11 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz " block x: " .size .L.str.3, 11 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz " block.y : " .size .L.str.4, 12 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz " block.z: " .size .L.str.5, 11 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz " Starting." .size .L.str.6, 11 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "There is no device available." .size .L.str.7, 30 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "Detected " .size .L.str.8, 10 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz " cuda capable device." .size .L.str.9, 22 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "Deivce id: " .size .L.str.10, 12 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz " " .size .L.str.11, 2 .type .L.str.12,@object # @.str.12 .L.str.12: .asciz "CUDA driver version : " .size .L.str.12, 23 .type .L.str.13,@object # @.str.13 .L.str.13: .asciz " runtime version: " .size .L.str.13, 19 .type .L.str.14,@object # @.str.14 .L.str.14: .asciz "Total mem: " .size .L.str.14, 12 .type .L.str.15,@object # @.str.15 .L.str.15: .asciz "After device reset." .size .L.str.15, 20 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z10checkIndexv" .size .L__unnamed_1, 16 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__checkIndexv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10checkIndexv .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0019e48c_00000000-6_getDeviceInfo.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3673: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3673: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string " Starting." .LC1: .string "There is no device available." .LC2: .string "Detected " .LC3: .string " cuda capable device." .LC4: .string "Deivce id: " .LC5: .string " " .LC6: .string "CUDA driver version : " .LC7: .string " runtime version: " .LC8: .string "Total mem: " .LC9: .string "After device reset." .text .globl _Z5main1iPPc .type _Z5main1iPPc, @function _Z5main1iPPc: .LFB3670: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $1064, %rsp .cfi_def_cfa_offset 1088 movq %fs:40, %rax movq %rax, 1048(%rsp) xorl %eax, %eax movl $10, %edx leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbx testq %rbx, %rbx je .L36 cmpb $0, 56(%rbx) je .L6 movzbl 67(%rbx), %esi .L7: movsbl %sil, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $0, 4(%rsp) leaq 4(%rsp), %rdi call cudaGetDeviceCount@PLT cmpl $0, 4(%rsp) jne .L8 movl $29, %edx leaq .LC1(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbx testq %rbx, %rbx je .L37 cmpb $0, 56(%rbx) je .L11 movzbl 67(%rbx), %esi .L12: movsbl %sil, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT .L13: movl $0, %edi call cudaSetDevice@PLT leaq 16(%rsp), %rbp movl $0, %esi movq %rbp, %rdi call cudaGetDeviceProperties_v2@PLT movl $11, %edx leaq .LC4(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl $0, %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movl $1, %edx leaq .LC5(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq %rbp, %rdi call strlen@PLT movq %rax, %rdx movq %rbp, %rsi movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L38 cmpb $0, 56(%rbp) je .L20 movzbl 67(%rbp), %esi .L21: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT leaq 8(%rsp), %rdi call cudaDriverGetVersion@PLT leaq 12(%rsp), %rdi call cudaRuntimeGetVersion@PLT movl $22, %edx leaq .LC6(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl 8(%rsp), %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movl $18, %edx leaq .LC7(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl 12(%rsp), %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L39 cmpb $0, 56(%rbp) je .L24 movzbl 67(%rbp), %esi .L25: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $11, %edx leaq .LC8(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq 304(%rsp), %rsi shrq $30, %rsi movq %rbx, %rdi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L40 cmpb $0, 56(%rbp) je .L28 movzbl 67(%rbp), %esi .L29: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT call cudaDeviceReset@PLT movl $19, %edx leaq .LC9(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbx testq %rbx, %rbx je .L41 cmpb $0, 56(%rbx) je .L32 movzbl 67(%rbx), %esi .L33: movsbl %sil, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movq 1048(%rsp), %rax subq %fs:40, %rax jne .L42 movl $1, %eax addq $1064, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L36: .cfi_restore_state movq 1048(%rsp), %rax subq %fs:40, %rax jne .L43 call _ZSt16__throw_bad_castv@PLT .L43: call __stack_chk_fail@PLT .L6: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) movl %eax, %esi jmp .L7 .L37: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L44 call _ZSt16__throw_bad_castv@PLT .L44: call __stack_chk_fail@PLT .L11: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) movl %eax, %esi jmp .L12 .L8: movl $9, %edx leaq .LC2(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl 4(%rsp), %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movl $21, %edx leaq .LC3(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L45 cmpb $0, 56(%rbp) je .L16 movzbl 67(%rbp), %esi .L17: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT jmp .L13 .L45: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L46 call _ZSt16__throw_bad_castv@PLT .L46: call __stack_chk_fail@PLT .L16: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L17 .L38: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L47 call _ZSt16__throw_bad_castv@PLT .L47: call __stack_chk_fail@PLT .L20: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L21 .L39: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L48 call _ZSt16__throw_bad_castv@PLT .L48: call __stack_chk_fail@PLT .L24: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L25 .L40: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L49 call _ZSt16__throw_bad_castv@PLT .L49: call __stack_chk_fail@PLT .L28: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L29 .L41: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L50 call _ZSt16__throw_bad_castv@PLT .L50: call __stack_chk_fail@PLT .L32: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) movl %eax, %esi jmp .L33 .L42: call __stack_chk_fail@PLT .cfi_endproc .LFE3670: .size _Z5main1iPPc, .-_Z5main1iPPc .globl _Z29__device_stub__Z10checkIndexvv .type _Z29__device_stub__Z10checkIndexvv, @function _Z29__device_stub__Z10checkIndexvv: .LFB3695: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L55 .L51: movq 72(%rsp), %rax subq %fs:40, %rax jne .L56 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L55: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z10checkIndexv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L51 .L56: call __stack_chk_fail@PLT .cfi_endproc .LFE3695: .size _Z29__device_stub__Z10checkIndexvv, .-_Z29__device_stub__Z10checkIndexvv .globl _Z10checkIndexv .type _Z10checkIndexv, @function _Z10checkIndexv: .LFB3696: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z10checkIndexvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3696: .size _Z10checkIndexv, .-_Z10checkIndexv .section .rodata.str1.1 .LC10: .string " grid x: " .LC11: .string " grid.y: " .LC12: .string " grid.z : " .LC13: .string " block x: " .LC14: .string " block.y : " .LC15: .string " block.z: " .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $32, %rsp .cfi_def_cfa_offset 48 leaq .LC10(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $4, %esi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rdi leaq .LC11(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $1, %esi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rdi leaq .LC12(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $1, %esi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT leaq .LC13(%rip), %rsi movq %rbx, %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $3, %esi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rdi leaq .LC14(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $1, %esi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rdi leaq .LC15(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $1, %esi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $4, 20(%rsp) movl $1, 24(%rsp) movl $3, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 8(%rsp), %rdx movl $1, %ecx movq 20(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L62 .L60: call cudaDeviceSynchronize@PLT movl $0, %eax addq $32, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L62: .cfi_restore_state call _Z29__device_stub__Z10checkIndexvv jmp .L60 .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.1 .LC16: .string "_Z10checkIndexv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3698: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC16(%rip), %rdx movq %rdx, %rcx leaq _Z10checkIndexv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3698: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "getDeviceInfo.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z25__device_stub__checkIndexv # -- Begin function _Z25__device_stub__checkIndexv .p2align 4, 0x90 .type _Z25__device_stub__checkIndexv,@function _Z25__device_stub__checkIndexv: # @_Z25__device_stub__checkIndexv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z10checkIndexv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z25__device_stub__checkIndexv, .Lfunc_end0-_Z25__device_stub__checkIndexv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $56, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl $_ZSt4cout, %edi movl $.L.str, %esi movl $9, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movl $4, %esi callq _ZNSo9_M_insertImEERSoT_ movq %rax, %rbx movl $.L.str.1, %esi movl $9, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $1, %esi movq %rbx, %rdi callq _ZNSo9_M_insertImEERSoT_ movq %rax, %rbx movl $.L.str.2, %esi movl $10, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $1, %esi movq %rbx, %rdi callq _ZNSo9_M_insertImEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB1_11 # %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%rbx) je .LBB1_3 # %bb.2: movzbl 67(%rbx), %ecx jmp .LBB1_4 .LBB1_3: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB1_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $_ZSt4cout, %edi movl $.L.str.3, %esi movl $10, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movl $3, %esi callq _ZNSo9_M_insertImEERSoT_ movq %rax, %rbx movl $.L.str.4, %esi movl $11, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $1, %esi movq %rbx, %rdi callq _ZNSo9_M_insertImEERSoT_ movq %rax, %rbx movl $.L.str.5, %esi movl $10, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $1, %esi movq %rbx, %rdi callq _ZNSo9_M_insertImEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB1_11 # %bb.5: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i9 cmpb $0, 56(%rbx) je .LBB1_7 # %bb.6: movzbl 67(%rbx), %ecx jmp .LBB1_8 .LBB1_7: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB1_8: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit12 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movabsq $4294967299, %rdx # imm = 0x100000003 leaq 1(%rdx), %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_10 # %bb.9: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z10checkIndexv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_10: callq hipDeviceSynchronize xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .LBB1_11: .cfi_def_cfa_offset 80 callq _ZSt16__throw_bad_castv .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .globl _Z5main1iPPc # -- Begin function _Z5main1iPPc .p2align 4, 0x90 .type _Z5main1iPPc,@function _Z5main1iPPc: # @_Z5main1iPPc .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $1496, %rsp # imm = 0x5D8 .cfi_def_cfa_offset 1520 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl $_ZSt4cout, %edi movl $.L.str.6, %esi movl $10, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB2_32 # %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%rbx) je .LBB2_3 # %bb.2: movzbl 67(%rbx), %eax jmp .LBB2_4 .LBB2_3: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB2_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $0, 12(%rsp) leaq 12(%rsp), %rdi callq hipGetDeviceCount cmpl $0, 12(%rsp) je .LBB2_5 # %bb.10: movl $_ZSt4cout, %edi movl $.L.str.8, %esi movl $9, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl 12(%rsp), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movq %rax, %rbx movl $.L.str.9, %esi movl $21, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r14 testq %r14, %r14 je .LBB2_32 # %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i9 cmpb $0, 56(%r14) je .LBB2_13 # %bb.12: movzbl 67(%r14), %eax jmp .LBB2_14 .LBB2_5: movl $_ZSt4cout, %edi movl $.L.str.7, %esi movl $29, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB2_32 # %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i4 cmpb $0, 56(%rbx) je .LBB2_8 # %bb.7: movzbl 67(%rbx), %eax jmp .LBB2_9 .LBB2_13: movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) .LBB2_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit12 movsbl %al, %esi movq %rbx, %rdi jmp .LBB2_15 .LBB2_8: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB2_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit7 movsbl %al, %esi movl $_ZSt4cout, %edi .LBB2_15: callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv xorl %edi, %edi callq hipSetDevice leaq 24(%rsp), %r14 movq %r14, %rdi xorl %esi, %esi callq hipGetDevicePropertiesR0600 movl $_ZSt4cout, %edi movl $.L.str.10, %esi movl $11, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi xorl %esi, %esi callq _ZNSolsEi movq %rax, %rbx movl $.L.str.11, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq %r14, %rdi callq strlen movq %rbx, %rdi movq %r14, %rsi movq %rax, %rdx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r14 testq %r14, %r14 je .LBB2_32 # %bb.16: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i14 cmpb $0, 56(%r14) je .LBB2_18 # %bb.17: movzbl 67(%r14), %eax jmp .LBB2_19 .LBB2_18: movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) .LBB2_19: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit17 movsbl %al, %esi movq %rbx, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv leaq 20(%rsp), %rdi callq hipDriverGetVersion leaq 16(%rsp), %rdi callq hipRuntimeGetVersion movl $_ZSt4cout, %edi movl $.L.str.12, %esi movl $22, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl 20(%rsp), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movq %rax, %rbx movl $.L.str.13, %esi movl $18, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl 16(%rsp), %esi movq %rbx, %rdi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB2_32 # %bb.20: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i19 cmpb $0, 56(%rbx) je .LBB2_22 # %bb.21: movzbl 67(%rbx), %ecx jmp .LBB2_23 .LBB2_22: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB2_23: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit22 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $_ZSt4cout, %edi movl $.L.str.14, %esi movl $11, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq 312(%rsp), %rsi shrq $30, %rsi movl $_ZSt4cout, %edi callq _ZNSo9_M_insertImEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB2_32 # %bb.24: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i24 cmpb $0, 56(%rbx) je .LBB2_26 # %bb.25: movzbl 67(%rbx), %ecx jmp .LBB2_27 .LBB2_26: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB2_27: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit27 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv callq hipDeviceReset movl $_ZSt4cout, %edi movl $.L.str.15, %esi movl $19, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB2_32 # %bb.28: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i29 cmpb $0, 56(%rbx) je .LBB2_30 # %bb.29: movzbl 67(%rbx), %eax jmp .LBB2_31 .LBB2_30: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB2_31: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit32 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $1, %eax addq $1496, %rsp # imm = 0x5D8 .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .LBB2_32: .cfi_def_cfa_offset 1520 callq _ZSt16__throw_bad_castv .Lfunc_end2: .size _Z5main1iPPc, .Lfunc_end2-_Z5main1iPPc .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10checkIndexv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z10checkIndexv,@object # @_Z10checkIndexv .section .rodata,"a",@progbits .globl _Z10checkIndexv .p2align 3, 0x0 _Z10checkIndexv: .quad _Z25__device_stub__checkIndexv .size _Z10checkIndexv, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz " grid x: " .size .L.str, 10 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz " grid.y: " .size .L.str.1, 10 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz " grid.z : " .size .L.str.2, 11 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz " block x: " .size .L.str.3, 11 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz " block.y : " .size .L.str.4, 12 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz " block.z: " .size .L.str.5, 11 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz " Starting." .size .L.str.6, 11 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "There is no device available." .size .L.str.7, 30 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "Detected " .size .L.str.8, 10 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz " cuda capable device." .size .L.str.9, 22 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "Deivce id: " .size .L.str.10, 12 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz " " .size .L.str.11, 2 .type .L.str.12,@object # @.str.12 .L.str.12: .asciz "CUDA driver version : " .size .L.str.12, 23 .type .L.str.13,@object # @.str.13 .L.str.13: .asciz " runtime version: " .size .L.str.13, 19 .type .L.str.14,@object # @.str.14 .L.str.14: .asciz "Total mem: " .size .L.str.14, 12 .type .L.str.15,@object # @.str.15 .L.str.15: .asciz "After device reset." .size .L.str.15, 20 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z10checkIndexv" .size .L__unnamed_1, 16 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__checkIndexv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10checkIndexv .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void UpdateSecond(float *WHAT , float *WITH , float AMOUNT , float *MULT) { int idx = threadIdx.x + blockIdx.x * blockDim.x; WHAT[idx] *=MULT[idx]; WHAT[idx] +=AMOUNT*WITH[idx]; MULT[idx] = 1.0f; }
code for sm_80 Function : _Z12UpdateSecondPfS_fS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */ /* 0x000e220000002100 */ /*0020*/ HFMA2.MMA R11, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0b7435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e240000002500 */ /*0050*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */ /* 0x001fca00078e0206 */ /*0060*/ IMAD.WIDE R2, R6, R11, c[0x0][0x178] ; /* 0x00005e0006027625 */ /* 0x000fc800078e020b */ /*0070*/ IMAD.WIDE R4, R6, R11, c[0x0][0x160] ; /* 0x0000580006047625 */ /* 0x000fe200078e020b */ /*0080*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */ /* 0x000ea8000c1e1900 */ /*0090*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */ /* 0x000ea4000c1e1900 */ /*00a0*/ FMUL R9, R0, R7 ; /* 0x0000000700097220 */ /* 0x004fe40000400000 */ /*00b0*/ IMAD.WIDE R6, R6, R11, c[0x0][0x168] ; /* 0x00005a0006067625 */ /* 0x000fc600078e020b */ /*00c0*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */ /* 0x000fe8000c101904 */ /*00d0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */ /* 0x000ea2000c1e1900 */ /*00e0*/ HFMA2.MMA R13, -RZ, RZ, 1.875, 0 ; /* 0x3f800000ff0d7435 */ /* 0x000fe200000001ff */ /*00f0*/ FFMA R11, R6, c[0x0][0x170], R9 ; /* 0x00005c00060b7a23 */ /* 0x004fca0000000009 */ /*0100*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */ /* 0x000fe8000c101904 */ /*0110*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */ /* 0x000fe2000c101904 */ /*0120*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0130*/ BRA 0x130; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void UpdateSecond(float *WHAT , float *WITH , float AMOUNT , float *MULT) { int idx = threadIdx.x + blockIdx.x * blockDim.x; WHAT[idx] *=MULT[idx]; WHAT[idx] +=AMOUNT*WITH[idx]; MULT[idx] = 1.0f; }
.file "tmpxft_00048e4c_00000000-6_UpdateSecond.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z37__device_stub__Z12UpdateSecondPfS_fS_PfS_fS_ .type _Z37__device_stub__Z12UpdateSecondPfS_fS_PfS_fS_, @function _Z37__device_stub__Z12UpdateSecondPfS_fS_PfS_fS_: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movss %xmm0, 12(%rsp) movq %rdx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12UpdateSecondPfS_fS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z37__device_stub__Z12UpdateSecondPfS_fS_PfS_fS_, .-_Z37__device_stub__Z12UpdateSecondPfS_fS_PfS_fS_ .globl _Z12UpdateSecondPfS_fS_ .type _Z12UpdateSecondPfS_fS_, @function _Z12UpdateSecondPfS_fS_: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z12UpdateSecondPfS_fS_PfS_fS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z12UpdateSecondPfS_fS_, .-_Z12UpdateSecondPfS_fS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z12UpdateSecondPfS_fS_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z12UpdateSecondPfS_fS_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void UpdateSecond(float *WHAT , float *WITH , float AMOUNT , float *MULT) { int idx = threadIdx.x + blockIdx.x * blockDim.x; WHAT[idx] *=MULT[idx]; WHAT[idx] +=AMOUNT*WITH[idx]; MULT[idx] = 1.0f; }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void UpdateSecond(float *WHAT , float *WITH , float AMOUNT , float *MULT) { int idx = threadIdx.x + blockIdx.x * blockDim.x; WHAT[idx] *=MULT[idx]; WHAT[idx] +=AMOUNT*WITH[idx]; MULT[idx] = 1.0f; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void UpdateSecond(float *WHAT , float *WITH , float AMOUNT , float *MULT) { int idx = threadIdx.x + blockIdx.x * blockDim.x; WHAT[idx] *=MULT[idx]; WHAT[idx] +=AMOUNT*WITH[idx]; MULT[idx] = 1.0f; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z12UpdateSecondPfS_fS_ .globl _Z12UpdateSecondPfS_fS_ .p2align 8 .type _Z12UpdateSecondPfS_fS_,@function _Z12UpdateSecondPfS_fS_: s_clause 0x1 s_load_b32 s4, s[0:1], 0x2c s_load_b64 s[2:3], s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s4, s4, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1] s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b32 s0, s[0:1], 0x10 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] v_add_co_u32 v2, vcc_lo, s2, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo s_waitcnt lgkmcnt(0) v_add_co_u32 v4, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s6, v0 global_load_b32 v6, v[2:3], off global_load_b32 v7, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo s_waitcnt vmcnt(0) v_mul_f32_e32 v6, v6, v7 global_store_b32 v[4:5], v6, off global_load_b32 v0, v[0:1], off s_waitcnt vmcnt(0) v_fmac_f32_e32 v6, s0, v0 v_mov_b32_e32 v0, 1.0 global_store_b32 v[4:5], v6, off global_store_b32 v[2:3], v0, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12UpdateSecondPfS_fS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z12UpdateSecondPfS_fS_, .Lfunc_end0-_Z12UpdateSecondPfS_fS_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12UpdateSecondPfS_fS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z12UpdateSecondPfS_fS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void UpdateSecond(float *WHAT , float *WITH , float AMOUNT , float *MULT) { int idx = threadIdx.x + blockIdx.x * blockDim.x; WHAT[idx] *=MULT[idx]; WHAT[idx] +=AMOUNT*WITH[idx]; MULT[idx] = 1.0f; }
.text .file "UpdateSecond.hip" .globl _Z27__device_stub__UpdateSecondPfS_fS_ # -- Begin function _Z27__device_stub__UpdateSecondPfS_fS_ .p2align 4, 0x90 .type _Z27__device_stub__UpdateSecondPfS_fS_,@function _Z27__device_stub__UpdateSecondPfS_fS_: # @_Z27__device_stub__UpdateSecondPfS_fS_ .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movss %xmm0, 4(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 4(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12UpdateSecondPfS_fS_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z27__device_stub__UpdateSecondPfS_fS_, .Lfunc_end0-_Z27__device_stub__UpdateSecondPfS_fS_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12UpdateSecondPfS_fS_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z12UpdateSecondPfS_fS_,@object # @_Z12UpdateSecondPfS_fS_ .section .rodata,"a",@progbits .globl _Z12UpdateSecondPfS_fS_ .p2align 3, 0x0 _Z12UpdateSecondPfS_fS_: .quad _Z27__device_stub__UpdateSecondPfS_fS_ .size _Z12UpdateSecondPfS_fS_, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z12UpdateSecondPfS_fS_" .size .L__unnamed_1, 24 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__UpdateSecondPfS_fS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12UpdateSecondPfS_fS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z12UpdateSecondPfS_fS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */ /* 0x000e220000002100 */ /*0020*/ HFMA2.MMA R11, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0b7435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e240000002500 */ /*0050*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */ /* 0x001fca00078e0206 */ /*0060*/ IMAD.WIDE R2, R6, R11, c[0x0][0x178] ; /* 0x00005e0006027625 */ /* 0x000fc800078e020b */ /*0070*/ IMAD.WIDE R4, R6, R11, c[0x0][0x160] ; /* 0x0000580006047625 */ /* 0x000fe200078e020b */ /*0080*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */ /* 0x000ea8000c1e1900 */ /*0090*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */ /* 0x000ea4000c1e1900 */ /*00a0*/ FMUL R9, R0, R7 ; /* 0x0000000700097220 */ /* 0x004fe40000400000 */ /*00b0*/ IMAD.WIDE R6, R6, R11, c[0x0][0x168] ; /* 0x00005a0006067625 */ /* 0x000fc600078e020b */ /*00c0*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */ /* 0x000fe8000c101904 */ /*00d0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */ /* 0x000ea2000c1e1900 */ /*00e0*/ HFMA2.MMA R13, -RZ, RZ, 1.875, 0 ; /* 0x3f800000ff0d7435 */ /* 0x000fe200000001ff */ /*00f0*/ FFMA R11, R6, c[0x0][0x170], R9 ; /* 0x00005c00060b7a23 */ /* 0x004fca0000000009 */ /*0100*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */ /* 0x000fe8000c101904 */ /*0110*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */ /* 0x000fe2000c101904 */ /*0120*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0130*/ BRA 0x130; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z12UpdateSecondPfS_fS_ .globl _Z12UpdateSecondPfS_fS_ .p2align 8 .type _Z12UpdateSecondPfS_fS_,@function _Z12UpdateSecondPfS_fS_: s_clause 0x1 s_load_b32 s4, s[0:1], 0x2c s_load_b64 s[2:3], s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s4, s4, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1] s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b32 s0, s[0:1], 0x10 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] v_add_co_u32 v2, vcc_lo, s2, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo s_waitcnt lgkmcnt(0) v_add_co_u32 v4, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s6, v0 global_load_b32 v6, v[2:3], off global_load_b32 v7, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo s_waitcnt vmcnt(0) v_mul_f32_e32 v6, v6, v7 global_store_b32 v[4:5], v6, off global_load_b32 v0, v[0:1], off s_waitcnt vmcnt(0) v_fmac_f32_e32 v6, s0, v0 v_mov_b32_e32 v0, 1.0 global_store_b32 v[4:5], v6, off global_store_b32 v[2:3], v0, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12UpdateSecondPfS_fS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z12UpdateSecondPfS_fS_, .Lfunc_end0-_Z12UpdateSecondPfS_fS_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12UpdateSecondPfS_fS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z12UpdateSecondPfS_fS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00048e4c_00000000-6_UpdateSecond.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z37__device_stub__Z12UpdateSecondPfS_fS_PfS_fS_ .type _Z37__device_stub__Z12UpdateSecondPfS_fS_PfS_fS_, @function _Z37__device_stub__Z12UpdateSecondPfS_fS_PfS_fS_: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movss %xmm0, 12(%rsp) movq %rdx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12UpdateSecondPfS_fS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z37__device_stub__Z12UpdateSecondPfS_fS_PfS_fS_, .-_Z37__device_stub__Z12UpdateSecondPfS_fS_PfS_fS_ .globl _Z12UpdateSecondPfS_fS_ .type _Z12UpdateSecondPfS_fS_, @function _Z12UpdateSecondPfS_fS_: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z12UpdateSecondPfS_fS_PfS_fS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z12UpdateSecondPfS_fS_, .-_Z12UpdateSecondPfS_fS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z12UpdateSecondPfS_fS_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z12UpdateSecondPfS_fS_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "UpdateSecond.hip" .globl _Z27__device_stub__UpdateSecondPfS_fS_ # -- Begin function _Z27__device_stub__UpdateSecondPfS_fS_ .p2align 4, 0x90 .type _Z27__device_stub__UpdateSecondPfS_fS_,@function _Z27__device_stub__UpdateSecondPfS_fS_: # @_Z27__device_stub__UpdateSecondPfS_fS_ .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movss %xmm0, 4(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 4(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12UpdateSecondPfS_fS_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z27__device_stub__UpdateSecondPfS_fS_, .Lfunc_end0-_Z27__device_stub__UpdateSecondPfS_fS_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12UpdateSecondPfS_fS_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z12UpdateSecondPfS_fS_,@object # @_Z12UpdateSecondPfS_fS_ .section .rodata,"a",@progbits .globl _Z12UpdateSecondPfS_fS_ .p2align 3, 0x0 _Z12UpdateSecondPfS_fS_: .quad _Z27__device_stub__UpdateSecondPfS_fS_ .size _Z12UpdateSecondPfS_fS_, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z12UpdateSecondPfS_fS_" .size .L__unnamed_1, 24 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__UpdateSecondPfS_fS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12UpdateSecondPfS_fS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> #include <unistd.h> #include "cuda.h" int main() { // show memory usage of GPU size_t free_byte ; size_t total_byte ; while (true ) { cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ; if ( cudaSuccess != cuda_status ){ std::cout << "Error: cudaMemGetInfo fails, " << cudaGetErrorString(cuda_status) << std::endl; exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; std::cout << "GPU memory usage: used = " << used_db/1024.0/1024.0 << ", free = " << free_db/1024.0/1024.0 << " MB, total = " << total_db/1024.0/1024.0 << " MB" << std::endl; sleep(1); } return 0; }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <iostream> #include <unistd.h> #include "cuda.h" int main() { // show memory usage of GPU size_t free_byte ; size_t total_byte ; while (true ) { cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ; if ( cudaSuccess != cuda_status ){ std::cout << "Error: cudaMemGetInfo fails, " << cudaGetErrorString(cuda_status) << std::endl; exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; std::cout << "GPU memory usage: used = " << used_db/1024.0/1024.0 << ", free = " << free_db/1024.0/1024.0 << " MB, total = " << total_db/1024.0/1024.0 << " MB" << std::endl; sleep(1); } return 0; }
.file "tmpxft_000bb51d_00000000-6_test.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3685: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3685: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Error: cudaMemGetInfo fails, " .LC1: .string "GPU memory usage: used = " .LC3: .string ", free = " .LC4: .string " MB, total = " .LC5: .string " MB" .text .globl main .type main, @function main: .LFB3682: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $48, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leaq .LC1(%rip), %r13 leaq _ZSt4cout(%rip), %rbp leaq .LC3(%rip), %r12 jmp .L13 .L17: leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rbp movl %ebx, %edi call cudaGetErrorString@PLT movq %rax, %rsi movq %rbp, %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $1, %edi call exit@PLT .L5: movq %rax, %rdx shrq %rdx andl $1, %eax orq %rax, %rdx pxor %xmm0, %xmm0 cvtsi2sdq %rdx, %xmm0 addsd %xmm0, %xmm0 movsd %xmm0, (%rsp) jmp .L6 .L7: movq %rax, %rdx shrq %rdx andl $1, %eax orq %rax, %rdx pxor %xmm0, %xmm0 cvtsi2sdq %rdx, %xmm0 addsd %xmm0, %xmm0 movsd %xmm0, 8(%rsp) jmp .L8 .L18: movq 40(%rsp), %rax subq %fs:40, %rax jne .L16 call _ZSt16__throw_bad_castv@PLT .L16: call __stack_chk_fail@PLT .L11: movq %r14, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r14), %rax movl $10, %esi movq %r14, %rdi call *48(%rax) movl %eax, %esi .L12: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $1, %edi call sleep@PLT .L13: leaq 32(%rsp), %rsi leaq 24(%rsp), %rdi call cudaMemGetInfo@PLT movl %eax, %ebx testl %eax, %eax jne .L17 movq 24(%rsp), %rax testq %rax, %rax js .L5 pxor %xmm4, %xmm4 cvtsi2sdq %rax, %xmm4 movsd %xmm4, (%rsp) .L6: movq 32(%rsp), %rax testq %rax, %rax js .L7 pxor %xmm5, %xmm5 cvtsi2sdq %rax, %xmm5 movsd %xmm5, 8(%rsp) .L8: movsd 8(%rsp), %xmm1 subsd (%rsp), %xmm1 movq %xmm1, %rbx movl $25, %edx movq %r13, %rsi movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq %rbx, %xmm3 mulsd .LC2(%rip), %xmm3 movapd %xmm3, %xmm0 mulsd .LC2(%rip), %xmm0 movq %rbp, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rbx movl $9, %edx movq %r12, %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movsd (%rsp), %xmm2 mulsd .LC2(%rip), %xmm2 movapd %xmm2, %xmm0 mulsd .LC2(%rip), %xmm0 movq %rbx, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rbx movl $13, %edx leaq .LC4(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movsd 8(%rsp), %xmm0 mulsd .LC2(%rip), %xmm0 mulsd .LC2(%rip), %xmm0 movq %rbx, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rbx movl $3, %edx leaq .LC5(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r14 testq %r14, %r14 je .L18 cmpb $0, 56(%r14) je .L11 movzbl 67(%r14), %esi jmp .L12 .cfi_endproc .LFE3682: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3708: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3708: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC2: .long 0 .long 1062207488 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <unistd.h> #include "cuda.h" int main() { // show memory usage of GPU size_t free_byte ; size_t total_byte ; while (true ) { cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ; if ( cudaSuccess != cuda_status ){ std::cout << "Error: cudaMemGetInfo fails, " << cudaGetErrorString(cuda_status) << std::endl; exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; std::cout << "GPU memory usage: used = " << used_db/1024.0/1024.0 << ", free = " << free_db/1024.0/1024.0 << " MB, total = " << total_db/1024.0/1024.0 << " MB" << std::endl; sleep(1); } return 0; }
#include <iostream> #include <unistd.h> #include "hip/hip_runtime.h" int main() { // show memory usage of GPU size_t free_byte ; size_t total_byte ; while (true ) { hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ) ; if ( hipSuccess != cuda_status ){ std::cout << "Error: hipMemGetInfo fails, " << hipGetErrorString(cuda_status) << std::endl; exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; std::cout << "GPU memory usage: used = " << used_db/1024.0/1024.0 << ", free = " << free_db/1024.0/1024.0 << " MB, total = " << total_db/1024.0/1024.0 << " MB" << std::endl; sleep(1); } return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <iostream> #include <unistd.h> #include "hip/hip_runtime.h" int main() { // show memory usage of GPU size_t free_byte ; size_t total_byte ; while (true ) { hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ) ; if ( hipSuccess != cuda_status ){ std::cout << "Error: hipMemGetInfo fails, " << hipGetErrorString(cuda_status) << std::endl; exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; std::cout << "GPU memory usage: used = " << used_db/1024.0/1024.0 << ", free = " << free_db/1024.0/1024.0 << " MB, total = " << total_db/1024.0/1024.0 << " MB" << std::endl; sleep(1); } return 0; }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <iostream> #include <unistd.h> #include "hip/hip_runtime.h" int main() { // show memory usage of GPU size_t free_byte ; size_t total_byte ; while (true ) { hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ) ; if ( hipSuccess != cuda_status ){ std::cout << "Error: hipMemGetInfo fails, " << hipGetErrorString(cuda_status) << std::endl; exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; std::cout << "GPU memory usage: used = " << used_db/1024.0/1024.0 << ", free = " << free_db/1024.0/1024.0 << " MB, total = " << total_db/1024.0/1024.0 << " MB" << std::endl; sleep(1); } return 0; }
.text .file "test.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 # -- Begin function main .LCPI0_0: .long 1127219200 # 0x43300000 .long 1160773632 # 0x45300000 .long 0 # 0x0 .long 0 # 0x0 .LCPI0_1: .quad 0x4330000000000000 # double 4503599627370496 .quad 0x4530000000000000 # double 1.9342813113834067E+25 .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI0_2: .quad 0x3f50000000000000 # double 9.765625E-4 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 subq $72, %rsp .cfi_def_cfa_offset 112 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 leaq 8(%rsp), %rdi movq %rsp, %rsi callq hipMemGetInfo testl %eax, %eax jne .LBB0_7 # %bb.1: leaq 8(%rsp), %rbx movq %rsp, %r14 jmp .LBB0_2 .p2align 4, 0x90 .LBB0_5: # in Loop: Header=BB0_2 Depth=1 movq %r12, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r12), %rax movq %r12, %rdi movl $10, %esi callq *48(%rax) .LBB0_6: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit # in Loop: Header=BB0_2 Depth=1 movsbl %al, %esi movq %r15, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $1, %edi callq sleep movq %rbx, %rdi movq %r14, %rsi callq hipMemGetInfo testl %eax, %eax jne .LBB0_7 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movsd 8(%rsp), %xmm0 # xmm0 = mem[0],zero movapd .LCPI0_0(%rip), %xmm1 # xmm1 = [1127219200,1160773632,0,0] unpcklps %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] movapd .LCPI0_1(%rip), %xmm2 # xmm2 = [4.503599627370496E+15,1.9342813113834067E+25] subpd %xmm2, %xmm0 movapd %xmm0, %xmm3 unpckhpd %xmm0, %xmm3 # xmm3 = xmm3[1],xmm0[1] addsd %xmm0, %xmm3 movapd %xmm3, 32(%rsp) # 16-byte Spill movsd (%rsp), %xmm0 # xmm0 = mem[0],zero unpcklps %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] subpd %xmm2, %xmm0 movapd %xmm0, %xmm1 unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1] addsd %xmm0, %xmm1 movapd %xmm1, 48(%rsp) # 16-byte Spill subsd %xmm3, %xmm1 movapd %xmm1, 16(%rsp) # 16-byte Spill movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $25, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movsd .LCPI0_2(%rip), %xmm0 # xmm0 = mem[0],zero movapd 16(%rsp), %xmm1 # 16-byte Reload mulsd %xmm0, %xmm1 mulsd %xmm0, %xmm1 movapd %xmm1, %xmm0 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ movq %rax, %r15 movl $.L.str.2, %esi movl $9, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movapd 32(%rsp), %xmm0 # 16-byte Reload movsd .LCPI0_2(%rip), %xmm1 # xmm1 = mem[0],zero mulsd %xmm1, %xmm0 mulsd %xmm1, %xmm0 movq %r15, %rdi callq _ZNSo9_M_insertIdEERSoT_ movq %rax, %r15 movl $.L.str.3, %esi movl $13, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movapd 48(%rsp), %xmm0 # 16-byte Reload movsd .LCPI0_2(%rip), %xmm1 # xmm1 = mem[0],zero mulsd %xmm1, %xmm0 mulsd %xmm1, %xmm0 movq %r15, %rdi callq _ZNSo9_M_insertIdEERSoT_ movq %rax, %r15 movl $.L.str.4, %esi movl $3, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%r15), %rax movq -24(%rax), %rax movq 240(%r15,%rax), %r12 testq %r12, %r12 je .LBB0_8 # %bb.3: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i # in Loop: Header=BB0_2 Depth=1 cmpb $0, 56(%r12) je .LBB0_5 # %bb.4: # in Loop: Header=BB0_2 Depth=1 movzbl 67(%r12), %eax jmp .LBB0_6 .LBB0_7: # %._crit_edge movl $_ZSt4cout, %edi movl $.L.str, %esi movl %eax, %ebx callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movq %rax, %r14 movl %ebx, %edi callq hipGetErrorString movq %r14, %rdi movq %rax, %rsi callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movq %rax, %rdi callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_ movl $1, %edi callq exit .LBB0_8: callq _ZSt16__throw_bad_castv .Lfunc_end0: .size main, .Lfunc_end0-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Error: hipMemGetInfo fails, " .size .L.str, 29 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "GPU memory usage: used = " .size .L.str.1, 26 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz ", free = " .size .L.str.2, 10 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz " MB, total = " .size .L.str.3, 14 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz " MB" .size .L.str.4, 4 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _ZSt4cout .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000bb51d_00000000-6_test.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3685: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3685: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Error: cudaMemGetInfo fails, " .LC1: .string "GPU memory usage: used = " .LC3: .string ", free = " .LC4: .string " MB, total = " .LC5: .string " MB" .text .globl main .type main, @function main: .LFB3682: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $48, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leaq .LC1(%rip), %r13 leaq _ZSt4cout(%rip), %rbp leaq .LC3(%rip), %r12 jmp .L13 .L17: leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rbp movl %ebx, %edi call cudaGetErrorString@PLT movq %rax, %rsi movq %rbp, %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $1, %edi call exit@PLT .L5: movq %rax, %rdx shrq %rdx andl $1, %eax orq %rax, %rdx pxor %xmm0, %xmm0 cvtsi2sdq %rdx, %xmm0 addsd %xmm0, %xmm0 movsd %xmm0, (%rsp) jmp .L6 .L7: movq %rax, %rdx shrq %rdx andl $1, %eax orq %rax, %rdx pxor %xmm0, %xmm0 cvtsi2sdq %rdx, %xmm0 addsd %xmm0, %xmm0 movsd %xmm0, 8(%rsp) jmp .L8 .L18: movq 40(%rsp), %rax subq %fs:40, %rax jne .L16 call _ZSt16__throw_bad_castv@PLT .L16: call __stack_chk_fail@PLT .L11: movq %r14, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r14), %rax movl $10, %esi movq %r14, %rdi call *48(%rax) movl %eax, %esi .L12: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $1, %edi call sleep@PLT .L13: leaq 32(%rsp), %rsi leaq 24(%rsp), %rdi call cudaMemGetInfo@PLT movl %eax, %ebx testl %eax, %eax jne .L17 movq 24(%rsp), %rax testq %rax, %rax js .L5 pxor %xmm4, %xmm4 cvtsi2sdq %rax, %xmm4 movsd %xmm4, (%rsp) .L6: movq 32(%rsp), %rax testq %rax, %rax js .L7 pxor %xmm5, %xmm5 cvtsi2sdq %rax, %xmm5 movsd %xmm5, 8(%rsp) .L8: movsd 8(%rsp), %xmm1 subsd (%rsp), %xmm1 movq %xmm1, %rbx movl $25, %edx movq %r13, %rsi movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq %rbx, %xmm3 mulsd .LC2(%rip), %xmm3 movapd %xmm3, %xmm0 mulsd .LC2(%rip), %xmm0 movq %rbp, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rbx movl $9, %edx movq %r12, %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movsd (%rsp), %xmm2 mulsd .LC2(%rip), %xmm2 movapd %xmm2, %xmm0 mulsd .LC2(%rip), %xmm0 movq %rbx, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rbx movl $13, %edx leaq .LC4(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movsd 8(%rsp), %xmm0 mulsd .LC2(%rip), %xmm0 mulsd .LC2(%rip), %xmm0 movq %rbx, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rbx movl $3, %edx leaq .LC5(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r14 testq %r14, %r14 je .L18 cmpb $0, 56(%r14) je .L11 movzbl 67(%r14), %esi jmp .L12 .cfi_endproc .LFE3682: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3708: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3708: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC2: .long 0 .long 1062207488 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "test.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 # -- Begin function main .LCPI0_0: .long 1127219200 # 0x43300000 .long 1160773632 # 0x45300000 .long 0 # 0x0 .long 0 # 0x0 .LCPI0_1: .quad 0x4330000000000000 # double 4503599627370496 .quad 0x4530000000000000 # double 1.9342813113834067E+25 .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI0_2: .quad 0x3f50000000000000 # double 9.765625E-4 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 subq $72, %rsp .cfi_def_cfa_offset 112 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 leaq 8(%rsp), %rdi movq %rsp, %rsi callq hipMemGetInfo testl %eax, %eax jne .LBB0_7 # %bb.1: leaq 8(%rsp), %rbx movq %rsp, %r14 jmp .LBB0_2 .p2align 4, 0x90 .LBB0_5: # in Loop: Header=BB0_2 Depth=1 movq %r12, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r12), %rax movq %r12, %rdi movl $10, %esi callq *48(%rax) .LBB0_6: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit # in Loop: Header=BB0_2 Depth=1 movsbl %al, %esi movq %r15, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $1, %edi callq sleep movq %rbx, %rdi movq %r14, %rsi callq hipMemGetInfo testl %eax, %eax jne .LBB0_7 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movsd 8(%rsp), %xmm0 # xmm0 = mem[0],zero movapd .LCPI0_0(%rip), %xmm1 # xmm1 = [1127219200,1160773632,0,0] unpcklps %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] movapd .LCPI0_1(%rip), %xmm2 # xmm2 = [4.503599627370496E+15,1.9342813113834067E+25] subpd %xmm2, %xmm0 movapd %xmm0, %xmm3 unpckhpd %xmm0, %xmm3 # xmm3 = xmm3[1],xmm0[1] addsd %xmm0, %xmm3 movapd %xmm3, 32(%rsp) # 16-byte Spill movsd (%rsp), %xmm0 # xmm0 = mem[0],zero unpcklps %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] subpd %xmm2, %xmm0 movapd %xmm0, %xmm1 unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1] addsd %xmm0, %xmm1 movapd %xmm1, 48(%rsp) # 16-byte Spill subsd %xmm3, %xmm1 movapd %xmm1, 16(%rsp) # 16-byte Spill movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $25, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movsd .LCPI0_2(%rip), %xmm0 # xmm0 = mem[0],zero movapd 16(%rsp), %xmm1 # 16-byte Reload mulsd %xmm0, %xmm1 mulsd %xmm0, %xmm1 movapd %xmm1, %xmm0 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ movq %rax, %r15 movl $.L.str.2, %esi movl $9, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movapd 32(%rsp), %xmm0 # 16-byte Reload movsd .LCPI0_2(%rip), %xmm1 # xmm1 = mem[0],zero mulsd %xmm1, %xmm0 mulsd %xmm1, %xmm0 movq %r15, %rdi callq _ZNSo9_M_insertIdEERSoT_ movq %rax, %r15 movl $.L.str.3, %esi movl $13, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movapd 48(%rsp), %xmm0 # 16-byte Reload movsd .LCPI0_2(%rip), %xmm1 # xmm1 = mem[0],zero mulsd %xmm1, %xmm0 mulsd %xmm1, %xmm0 movq %r15, %rdi callq _ZNSo9_M_insertIdEERSoT_ movq %rax, %r15 movl $.L.str.4, %esi movl $3, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%r15), %rax movq -24(%rax), %rax movq 240(%r15,%rax), %r12 testq %r12, %r12 je .LBB0_8 # %bb.3: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i # in Loop: Header=BB0_2 Depth=1 cmpb $0, 56(%r12) je .LBB0_5 # %bb.4: # in Loop: Header=BB0_2 Depth=1 movzbl 67(%r12), %eax jmp .LBB0_6 .LBB0_7: # %._crit_edge movl $_ZSt4cout, %edi movl $.L.str, %esi movl %eax, %ebx callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movq %rax, %r14 movl %ebx, %edi callq hipGetErrorString movq %r14, %rdi movq %rax, %rsi callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movq %rax, %rdi callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_ movl $1, %edi callq exit .LBB0_8: callq _ZSt16__throw_bad_castv .Lfunc_end0: .size main, .Lfunc_end0-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Error: hipMemGetInfo fails, " .size .L.str, 29 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "GPU memory usage: used = " .size .L.str.1, 26 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz ", free = " .size .L.str.2, 10 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz " MB, total = " .size .L.str.3, 14 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz " MB" .size .L.str.4, 4 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _ZSt4cout .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void STREAM_Triad_double(double *a, double *b, double *c, double scalar, size_t len) { size_t idx = threadIdx.x + blockIdx.x * blockDim.x; while (idx < len) { c[idx] = a[idx]+scalar*b[idx]; idx += blockDim.x * gridDim.x; } }
code for sm_80 Function : _Z19STREAM_Triad_doublePdS_S_dm .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */ /* 0x000fc80003f06070 */ /*0050*/ ISETP.GE.U32.AND.EX P0, PT, RZ, c[0x0][0x184], PT, P0 ; /* 0x00006100ff007a0c */ /* 0x000fda0003f06100 */ /*0060*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0070*/ IMAD.MOV.U32 R11, RZ, RZ, RZ ; /* 0x000000ffff0b7224 */ /* 0x000fe200078e00ff */ /*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0090*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0a7624 */ /* 0x000fe400078e00ff */ /*00a0*/ IMAD.SHL.U32 R8, R0.reuse, 0x8, RZ ; /* 0x0000000800087824 */ /* 0x041fe200078e00ff */ /*00b0*/ SHF.L.U64.HI R9, R0, 0x3, R11 ; /* 0x0000000300097819 */ /* 0x000fc8000001020b */ /*00c0*/ IADD3 R4, P1, R8.reuse, c[0x0][0x160], RZ ; /* 0x0000580008047a10 */ /* 0x040fe40007f3e0ff */ /*00d0*/ IADD3 R6, P0, R8, c[0x0][0x168], RZ ; /* 0x00005a0008067a10 */ /* 0x000fe40007f1e0ff */ /*00e0*/ IADD3.X R5, R9.reuse, c[0x0][0x164], RZ, P1, !PT ; /* 0x0000590009057a10 */ /* 0x040fe40000ffe4ff */ /*00f0*/ IADD3.X R7, R9, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b0009077a10 */ /* 0x000fc800007fe4ff */ /*0100*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1b00 */ /*0110*/ LDG.E.64 R2, [R6.64] ; /* 0x0000000406027981 */ /* 0x000ea2000c1e1b00 */ /*0120*/ IADD3 R8, P0, R8, c[0x0][0x170], RZ ; /* 0x00005c0008087a10 */ /* 0x000fe20007f1e0ff */ /*0130*/ IMAD R13, R10, c[0x0][0xc], RZ ; /* 0x000003000a0d7a24 */ /* 0x000fc600078e02ff */ /*0140*/ IADD3.X R9, R9, c[0x0][0x174], RZ, P0, !PT ; /* 0x00005d0009097a10 */ /* 0x000fe400007fe4ff */ /*0150*/ IADD3 R0, P0, R13, R0, RZ ; /* 0x000000000d007210 */ /* 0x000fca0007f1e0ff */ /*0160*/ IMAD.X R11, RZ, RZ, R11, P0 ; /* 0x000000ffff0b7224 */ /* 0x000fe200000e060b */ /*0170*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */ /* 0x000fc80003f06070 */ /*0180*/ ISETP.GE.U32.AND.EX P0, PT, R11, c[0x0][0x184], PT, P0 ; /* 0x000061000b007a0c */ /* 0x000fe20003f06100 */ /*0190*/ DFMA R2, R2, c[0x0][0x178], R4 ; /* 0x00005e0002027a2b */ /* 0x004e0e0000000004 */ /*01a0*/ STG.E.64 [R8.64], R2 ; /* 0x0000000208007986 */ /* 0x0011ea000c101b04 */ /*01b0*/ @!P0 BRA 0xa0 ; /* 0xfffffee000008947 */ /* 0x000fea000383ffff */ /*01c0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void STREAM_Triad_double(double *a, double *b, double *c, double scalar, size_t len) { size_t idx = threadIdx.x + blockIdx.x * blockDim.x; while (idx < len) { c[idx] = a[idx]+scalar*b[idx]; idx += blockDim.x * gridDim.x; } }
.file "tmpxft_00034e3b_00000000-6_STREAM_Triad_double.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z45__device_stub__Z19STREAM_Triad_doublePdS_S_dmPdS_S_dm .type _Z45__device_stub__Z19STREAM_Triad_doublePdS_S_dmPdS_S_dm, @function _Z45__device_stub__Z19STREAM_Triad_doublePdS_S_dmPdS_S_dm: .LFB2051: .cfi_startproc endbr64 subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movsd %xmm0, 16(%rsp) movq %rcx, 8(%rsp) movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 8(%rsp), %rax movq %rax, 144(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 152(%rsp), %rax subq %fs:40, %rax jne .L8 addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 184 pushq 56(%rsp) .cfi_def_cfa_offset 192 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z19STREAM_Triad_doublePdS_S_dm(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z45__device_stub__Z19STREAM_Triad_doublePdS_S_dmPdS_S_dm, .-_Z45__device_stub__Z19STREAM_Triad_doublePdS_S_dmPdS_S_dm .globl _Z19STREAM_Triad_doublePdS_S_dm .type _Z19STREAM_Triad_doublePdS_S_dm, @function _Z19STREAM_Triad_doublePdS_S_dm: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z45__device_stub__Z19STREAM_Triad_doublePdS_S_dmPdS_S_dm addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z19STREAM_Triad_doublePdS_S_dm, .-_Z19STREAM_Triad_doublePdS_S_dm .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z19STREAM_Triad_doublePdS_S_dm" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z19STREAM_Triad_doublePdS_S_dm(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void STREAM_Triad_double(double *a, double *b, double *c, double scalar, size_t len) { size_t idx = threadIdx.x + blockIdx.x * blockDim.x; while (idx < len) { c[idx] = a[idx]+scalar*b[idx]; idx += blockDim.x * gridDim.x; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void STREAM_Triad_double(double *a, double *b, double *c, double scalar, size_t len) { size_t idx = threadIdx.x + blockIdx.x * blockDim.x; while (idx < len) { c[idx] = a[idx]+scalar*b[idx]; idx += blockDim.x * gridDim.x; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void STREAM_Triad_double(double *a, double *b, double *c, double scalar, size_t len) { size_t idx = threadIdx.x + blockIdx.x * blockDim.x; while (idx < len) { c[idx] = a[idx]+scalar*b[idx]; idx += blockDim.x * gridDim.x; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z19STREAM_Triad_doublePdS_S_dm .globl _Z19STREAM_Triad_doublePdS_S_dm .p2align 8 .type _Z19STREAM_Triad_doublePdS_S_dm,@function _Z19STREAM_Triad_doublePdS_S_dm: s_clause 0x1 s_load_b32 s6, s[0:1], 0x34 s_load_b64 s[2:3], s[0:1], 0x20 s_add_u32 s4, s0, 40 s_addc_u32 s5, s1, 0 s_waitcnt lgkmcnt(0) s_and_b32 s12, s6, 0xffff s_mov_b32 s6, exec_lo v_mad_u64_u32 v[1:2], null, s15, s12, v[0:1] v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_u64_e64 s[2:3], v[1:2] s_cbranch_execz .LBB0_3 s_load_b32 s14, s[4:5], 0x0 s_load_b256 s[4:11], s[0:1], 0x0 v_lshlrev_b64 v[3:4], 3, v[1:2] s_mov_b32 s13, 0 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) s_mov_b32 s1, s13 s_waitcnt lgkmcnt(0) s_mul_i32 s12, s14, s12 s_lshl_b64 s[14:15], s[12:13], 3 .p2align 6 .LBB0_2: v_add_co_u32 v5, vcc_lo, s4, v3 v_add_co_ci_u32_e32 v6, vcc_lo, s5, v4, vcc_lo v_add_co_u32 v7, vcc_lo, s6, v3 v_add_co_ci_u32_e32 v8, vcc_lo, s7, v4, vcc_lo v_add_co_u32 v1, vcc_lo, v1, s12 global_load_b64 v[5:6], v[5:6], off global_load_b64 v[7:8], v[7:8], off v_add_co_ci_u32_e32 v2, vcc_lo, s13, v2, vcc_lo s_waitcnt vmcnt(0) v_fma_f64 v[5:6], v[7:8], s[10:11], v[5:6] v_add_co_u32 v7, vcc_lo, s8, v3 v_add_co_ci_u32_e32 v8, vcc_lo, s9, v4, vcc_lo v_cmp_le_u64_e32 vcc_lo, s[2:3], v[1:2] v_add_co_u32 v3, s0, v3, s14 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v4, s0, s15, v4, s0 s_or_b32 s1, vcc_lo, s1 global_store_b64 v[7:8], v[5:6], off s_and_not1_b32 exec_lo, exec_lo, s1 s_cbranch_execnz .LBB0_2 .LBB0_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z19STREAM_Triad_doublePdS_S_dm .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z19STREAM_Triad_doublePdS_S_dm, .Lfunc_end0-_Z19STREAM_Triad_doublePdS_S_dm .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 8 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z19STREAM_Triad_doublePdS_S_dm .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z19STREAM_Triad_doublePdS_S_dm.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void STREAM_Triad_double(double *a, double *b, double *c, double scalar, size_t len) { size_t idx = threadIdx.x + blockIdx.x * blockDim.x; while (idx < len) { c[idx] = a[idx]+scalar*b[idx]; idx += blockDim.x * gridDim.x; } }
.text .file "STREAM_Triad_double.hip" .globl _Z34__device_stub__STREAM_Triad_doublePdS_S_dm # -- Begin function _Z34__device_stub__STREAM_Triad_doublePdS_S_dm .p2align 4, 0x90 .type _Z34__device_stub__STREAM_Triad_doublePdS_S_dm,@function _Z34__device_stub__STREAM_Triad_doublePdS_S_dm: # @_Z34__device_stub__STREAM_Triad_doublePdS_S_dm .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movsd %xmm0, 64(%rsp) movq %rcx, 56(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z19STREAM_Triad_doublePdS_S_dm, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $152, %rsp .cfi_adjust_cfa_offset -152 retq .Lfunc_end0: .size _Z34__device_stub__STREAM_Triad_doublePdS_S_dm, .Lfunc_end0-_Z34__device_stub__STREAM_Triad_doublePdS_S_dm .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z19STREAM_Triad_doublePdS_S_dm, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z19STREAM_Triad_doublePdS_S_dm,@object # @_Z19STREAM_Triad_doublePdS_S_dm .section .rodata,"a",@progbits .globl _Z19STREAM_Triad_doublePdS_S_dm .p2align 3, 0x0 _Z19STREAM_Triad_doublePdS_S_dm: .quad _Z34__device_stub__STREAM_Triad_doublePdS_S_dm .size _Z19STREAM_Triad_doublePdS_S_dm, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z19STREAM_Triad_doublePdS_S_dm" .size .L__unnamed_1, 32 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z34__device_stub__STREAM_Triad_doublePdS_S_dm .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z19STREAM_Triad_doublePdS_S_dm .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z19STREAM_Triad_doublePdS_S_dm .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */ /* 0x000fc80003f06070 */ /*0050*/ ISETP.GE.U32.AND.EX P0, PT, RZ, c[0x0][0x184], PT, P0 ; /* 0x00006100ff007a0c */ /* 0x000fda0003f06100 */ /*0060*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0070*/ IMAD.MOV.U32 R11, RZ, RZ, RZ ; /* 0x000000ffff0b7224 */ /* 0x000fe200078e00ff */ /*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0090*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0a7624 */ /* 0x000fe400078e00ff */ /*00a0*/ IMAD.SHL.U32 R8, R0.reuse, 0x8, RZ ; /* 0x0000000800087824 */ /* 0x041fe200078e00ff */ /*00b0*/ SHF.L.U64.HI R9, R0, 0x3, R11 ; /* 0x0000000300097819 */ /* 0x000fc8000001020b */ /*00c0*/ IADD3 R4, P1, R8.reuse, c[0x0][0x160], RZ ; /* 0x0000580008047a10 */ /* 0x040fe40007f3e0ff */ /*00d0*/ IADD3 R6, P0, R8, c[0x0][0x168], RZ ; /* 0x00005a0008067a10 */ /* 0x000fe40007f1e0ff */ /*00e0*/ IADD3.X R5, R9.reuse, c[0x0][0x164], RZ, P1, !PT ; /* 0x0000590009057a10 */ /* 0x040fe40000ffe4ff */ /*00f0*/ IADD3.X R7, R9, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b0009077a10 */ /* 0x000fc800007fe4ff */ /*0100*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1b00 */ /*0110*/ LDG.E.64 R2, [R6.64] ; /* 0x0000000406027981 */ /* 0x000ea2000c1e1b00 */ /*0120*/ IADD3 R8, P0, R8, c[0x0][0x170], RZ ; /* 0x00005c0008087a10 */ /* 0x000fe20007f1e0ff */ /*0130*/ IMAD R13, R10, c[0x0][0xc], RZ ; /* 0x000003000a0d7a24 */ /* 0x000fc600078e02ff */ /*0140*/ IADD3.X R9, R9, c[0x0][0x174], RZ, P0, !PT ; /* 0x00005d0009097a10 */ /* 0x000fe400007fe4ff */ /*0150*/ IADD3 R0, P0, R13, R0, RZ ; /* 0x000000000d007210 */ /* 0x000fca0007f1e0ff */ /*0160*/ IMAD.X R11, RZ, RZ, R11, P0 ; /* 0x000000ffff0b7224 */ /* 0x000fe200000e060b */ /*0170*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */ /* 0x000fc80003f06070 */ /*0180*/ ISETP.GE.U32.AND.EX P0, PT, R11, c[0x0][0x184], PT, P0 ; /* 0x000061000b007a0c */ /* 0x000fe20003f06100 */ /*0190*/ DFMA R2, R2, c[0x0][0x178], R4 ; /* 0x00005e0002027a2b */ /* 0x004e0e0000000004 */ /*01a0*/ STG.E.64 [R8.64], R2 ; /* 0x0000000208007986 */ /* 0x0011ea000c101b04 */ /*01b0*/ @!P0 BRA 0xa0 ; /* 0xfffffee000008947 */ /* 0x000fea000383ffff */ /*01c0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z19STREAM_Triad_doublePdS_S_dm .globl _Z19STREAM_Triad_doublePdS_S_dm .p2align 8 .type _Z19STREAM_Triad_doublePdS_S_dm,@function _Z19STREAM_Triad_doublePdS_S_dm: s_clause 0x1 s_load_b32 s6, s[0:1], 0x34 s_load_b64 s[2:3], s[0:1], 0x20 s_add_u32 s4, s0, 40 s_addc_u32 s5, s1, 0 s_waitcnt lgkmcnt(0) s_and_b32 s12, s6, 0xffff s_mov_b32 s6, exec_lo v_mad_u64_u32 v[1:2], null, s15, s12, v[0:1] v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_u64_e64 s[2:3], v[1:2] s_cbranch_execz .LBB0_3 s_load_b32 s14, s[4:5], 0x0 s_load_b256 s[4:11], s[0:1], 0x0 v_lshlrev_b64 v[3:4], 3, v[1:2] s_mov_b32 s13, 0 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) s_mov_b32 s1, s13 s_waitcnt lgkmcnt(0) s_mul_i32 s12, s14, s12 s_lshl_b64 s[14:15], s[12:13], 3 .p2align 6 .LBB0_2: v_add_co_u32 v5, vcc_lo, s4, v3 v_add_co_ci_u32_e32 v6, vcc_lo, s5, v4, vcc_lo v_add_co_u32 v7, vcc_lo, s6, v3 v_add_co_ci_u32_e32 v8, vcc_lo, s7, v4, vcc_lo v_add_co_u32 v1, vcc_lo, v1, s12 global_load_b64 v[5:6], v[5:6], off global_load_b64 v[7:8], v[7:8], off v_add_co_ci_u32_e32 v2, vcc_lo, s13, v2, vcc_lo s_waitcnt vmcnt(0) v_fma_f64 v[5:6], v[7:8], s[10:11], v[5:6] v_add_co_u32 v7, vcc_lo, s8, v3 v_add_co_ci_u32_e32 v8, vcc_lo, s9, v4, vcc_lo v_cmp_le_u64_e32 vcc_lo, s[2:3], v[1:2] v_add_co_u32 v3, s0, v3, s14 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v4, s0, s15, v4, s0 s_or_b32 s1, vcc_lo, s1 global_store_b64 v[7:8], v[5:6], off s_and_not1_b32 exec_lo, exec_lo, s1 s_cbranch_execnz .LBB0_2 .LBB0_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z19STREAM_Triad_doublePdS_S_dm .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z19STREAM_Triad_doublePdS_S_dm, .Lfunc_end0-_Z19STREAM_Triad_doublePdS_S_dm .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 8 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z19STREAM_Triad_doublePdS_S_dm .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z19STREAM_Triad_doublePdS_S_dm.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00034e3b_00000000-6_STREAM_Triad_double.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z45__device_stub__Z19STREAM_Triad_doublePdS_S_dmPdS_S_dm .type _Z45__device_stub__Z19STREAM_Triad_doublePdS_S_dmPdS_S_dm, @function _Z45__device_stub__Z19STREAM_Triad_doublePdS_S_dmPdS_S_dm: .LFB2051: .cfi_startproc endbr64 subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movsd %xmm0, 16(%rsp) movq %rcx, 8(%rsp) movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 8(%rsp), %rax movq %rax, 144(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 152(%rsp), %rax subq %fs:40, %rax jne .L8 addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 184 pushq 56(%rsp) .cfi_def_cfa_offset 192 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z19STREAM_Triad_doublePdS_S_dm(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z45__device_stub__Z19STREAM_Triad_doublePdS_S_dmPdS_S_dm, .-_Z45__device_stub__Z19STREAM_Triad_doublePdS_S_dmPdS_S_dm .globl _Z19STREAM_Triad_doublePdS_S_dm .type _Z19STREAM_Triad_doublePdS_S_dm, @function _Z19STREAM_Triad_doublePdS_S_dm: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z45__device_stub__Z19STREAM_Triad_doublePdS_S_dmPdS_S_dm addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z19STREAM_Triad_doublePdS_S_dm, .-_Z19STREAM_Triad_doublePdS_S_dm .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z19STREAM_Triad_doublePdS_S_dm" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z19STREAM_Triad_doublePdS_S_dm(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "STREAM_Triad_double.hip" .globl _Z34__device_stub__STREAM_Triad_doublePdS_S_dm # -- Begin function _Z34__device_stub__STREAM_Triad_doublePdS_S_dm .p2align 4, 0x90 .type _Z34__device_stub__STREAM_Triad_doublePdS_S_dm,@function _Z34__device_stub__STREAM_Triad_doublePdS_S_dm: # @_Z34__device_stub__STREAM_Triad_doublePdS_S_dm .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movsd %xmm0, 64(%rsp) movq %rcx, 56(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z19STREAM_Triad_doublePdS_S_dm, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $152, %rsp .cfi_adjust_cfa_offset -152 retq .Lfunc_end0: .size _Z34__device_stub__STREAM_Triad_doublePdS_S_dm, .Lfunc_end0-_Z34__device_stub__STREAM_Triad_doublePdS_S_dm .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z19STREAM_Triad_doublePdS_S_dm, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z19STREAM_Triad_doublePdS_S_dm,@object # @_Z19STREAM_Triad_doublePdS_S_dm .section .rodata,"a",@progbits .globl _Z19STREAM_Triad_doublePdS_S_dm .p2align 3, 0x0 _Z19STREAM_Triad_doublePdS_S_dm: .quad _Z34__device_stub__STREAM_Triad_doublePdS_S_dm .size _Z19STREAM_Triad_doublePdS_S_dm, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z19STREAM_Triad_doublePdS_S_dm" .size .L__unnamed_1, 32 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z34__device_stub__STREAM_Triad_doublePdS_S_dm .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z19STREAM_Triad_doublePdS_S_dm .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
/* KAM PUI SO (ANTHONY) CS 510 GPU Homework 1 The Cross-Over Point CUDA really shines when given problems involving lots of data, but for small problems, using CUDA can be slower than a pure CPU solution. Since it can be difficult to get a feel for how large a problem needs to be before using the GPU becomes useful, this lab encourages you to find the "crossover point" for vector addition. Specifically: how large do the vectors need to be for the speed of GPU vector addition to eclipse the speed of CPU vector addition? Modify the vector_addition.cu example to time how long it takes the CPU and GPU vector addition functions to operate on vectors of different magnitudes. Find (roughly) what magnitude constitutes the cross-over point for this problem on your system. */ #include <sys/time.h> #include <time.h> #include <stdio.h> const int SIZE = 2; const int MAX = 214783647 ; /* The old-fashioned CPU-only way to add two vectors */ void add_vectors_host(int *result, int *a, int *b, int n) { for (int i=0; i<n; i++) result[i] = a[i] + b[i]; } /* The kernel that will execute on the GPU */ __global__ void add_vectors_kernel(int *result, int *a, int *b, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; // If we have more threads than the magnitude of our vector, we need to // make sure that the excess threads don't try to save results into // unallocated memory. if (idx < n) result[idx] = a[idx] + b[idx]; } /* This function encapsulates the process of creating and tearing down the * environment used to execute our vector addition kernel. The steps of the * process are: * 1. Allocate memory on the device to hold our vectors * 2. Copy the vectors to device memory * 3. Execute the kernel * 4. Retrieve the result vector from the device by copying it to the host * 5. Free memory on the device */ void add_vectors_dev(int *result, int *a, int *b, int n) { // Step 1: Allocate memory int *a_dev, *b_dev, *result_dev; // Since cudaMalloc does not return a pointer like C's traditional malloc // (it returns a success status instead), we provide as it's first argument // the address of our device pointer variable so that it can change the // value of our pointer to the correct device address. cudaMalloc((void **) &a_dev, sizeof(int) * n); cudaMalloc((void **) &b_dev, sizeof(int) * n); cudaMalloc((void **) &result_dev, sizeof(int) * n); // Step 2: Copy the input vectors to the device cudaMemcpy(a_dev, a, sizeof(int) * n, cudaMemcpyHostToDevice); cudaMemcpy(b_dev, b, sizeof(int) * n, cudaMemcpyHostToDevice); // Step 3: Invoke the kernel // We allocate enough blocks (each 512 threads long) in the grid to // accomodate all `n` elements in the vectors. The 512 long block size // is somewhat arbitrary, but with the constraint that we know the // hardware will support blocks of that size. dim3 dimGrid((n + 512 - 1) / 512, 1, 1); dim3 dimBlock(512, 1, 1); add_vectors_kernel<<<dimGrid, dimBlock>>>(result_dev, a_dev, b_dev, n); // Step 4: Retrieve the results cudaMemcpy(result, result_dev, sizeof(int) * n, cudaMemcpyDeviceToHost); // Step 5: Free device memory cudaFree(a_dev); cudaFree(b_dev); cudaFree(result_dev); } void print_vector(int *array, int n) { int i; for (i=0; i<n; i++) printf("%d ", array[i]); printf("\n"); } // This function print out the different in time void print_time(timeval start, timeval end) { printf("Time = %ld us\n", ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec ))); } // loop over size int vector_add_size(int size) { int n = size; // Length of the arrays int rand = n % 7; // int a[] = {0, 1, 2, 3, 4}; // int b[] = {5, 6, 7, 8, 9}; // int host_result[5]; // int device_result[5]; int *a = (int *) malloc(n * sizeof(int)); int *b = (int *) malloc(n * sizeof(int)); int *host_result = (int *) malloc(n * sizeof(int)); int *device_result = (int *) malloc(n * sizeof(int)); // verify malloc if (!(a && b && host_result && device_result)) { printf("out of memory\n"); return(-1); } struct timeval start, end; // create variable size matrix for (int i = rand; i < n; ++i) { a[i] = i; b[i] = SIZE + i; } /* int deviceCount; int device; // show cuda capability cudaGetDeviceCount(&deviceCount); for (device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); printf("Device %d has compute capability %d.%d.\n", device, deviceProp.major, deviceProp.minor); } */ // print answers: printf("The CPU's answer: "); gettimeofday(&start, NULL); add_vectors_host(host_result, a, b, n); gettimeofday(&end, NULL); // print_vector(host_result, n); print_time(start, end); printf("The GPU's answer: "); gettimeofday(&start, NULL); add_vectors_dev(device_result, a, b, n); gettimeofday(&end, NULL); // print_vector(device_result, n); print_time(start, end); // free memory free(a); free(b); free(host_result); free(device_result); return 0; } // main function int main(void) { int size = SIZE; int min = size; int max = 30000000; int inc = 50000; // for (int i = size; i < MAX; i*=size) for (int i = min; i <= max; i+=inc) { printf("\nsize = %d\n", i); vector_add_size(i); } return 0; }
code for sm_80 Function : _Z18add_vectors_kernelPiS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x170] ; /* 0x00005c0006047625 */ /* 0x000fc800078e0207 */ /*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006027625 */ /* 0x0c0fe400078e0207 */ /*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x160] ; /* 0x0000580006067625 */ /* 0x000fe200078e0207 */ /*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */ /* 0x004fca0007ffe0ff */ /*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* KAM PUI SO (ANTHONY) CS 510 GPU Homework 1 The Cross-Over Point CUDA really shines when given problems involving lots of data, but for small problems, using CUDA can be slower than a pure CPU solution. Since it can be difficult to get a feel for how large a problem needs to be before using the GPU becomes useful, this lab encourages you to find the "crossover point" for vector addition. Specifically: how large do the vectors need to be for the speed of GPU vector addition to eclipse the speed of CPU vector addition? Modify the vector_addition.cu example to time how long it takes the CPU and GPU vector addition functions to operate on vectors of different magnitudes. Find (roughly) what magnitude constitutes the cross-over point for this problem on your system. */ #include <sys/time.h> #include <time.h> #include <stdio.h> const int SIZE = 2; const int MAX = 214783647 ; /* The old-fashioned CPU-only way to add two vectors */ void add_vectors_host(int *result, int *a, int *b, int n) { for (int i=0; i<n; i++) result[i] = a[i] + b[i]; } /* The kernel that will execute on the GPU */ __global__ void add_vectors_kernel(int *result, int *a, int *b, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; // If we have more threads than the magnitude of our vector, we need to // make sure that the excess threads don't try to save results into // unallocated memory. if (idx < n) result[idx] = a[idx] + b[idx]; } /* This function encapsulates the process of creating and tearing down the * environment used to execute our vector addition kernel. The steps of the * process are: * 1. Allocate memory on the device to hold our vectors * 2. Copy the vectors to device memory * 3. Execute the kernel * 4. Retrieve the result vector from the device by copying it to the host * 5. Free memory on the device */ void add_vectors_dev(int *result, int *a, int *b, int n) { // Step 1: Allocate memory int *a_dev, *b_dev, *result_dev; // Since cudaMalloc does not return a pointer like C's traditional malloc // (it returns a success status instead), we provide as it's first argument // the address of our device pointer variable so that it can change the // value of our pointer to the correct device address. cudaMalloc((void **) &a_dev, sizeof(int) * n); cudaMalloc((void **) &b_dev, sizeof(int) * n); cudaMalloc((void **) &result_dev, sizeof(int) * n); // Step 2: Copy the input vectors to the device cudaMemcpy(a_dev, a, sizeof(int) * n, cudaMemcpyHostToDevice); cudaMemcpy(b_dev, b, sizeof(int) * n, cudaMemcpyHostToDevice); // Step 3: Invoke the kernel // We allocate enough blocks (each 512 threads long) in the grid to // accomodate all `n` elements in the vectors. The 512 long block size // is somewhat arbitrary, but with the constraint that we know the // hardware will support blocks of that size. dim3 dimGrid((n + 512 - 1) / 512, 1, 1); dim3 dimBlock(512, 1, 1); add_vectors_kernel<<<dimGrid, dimBlock>>>(result_dev, a_dev, b_dev, n); // Step 4: Retrieve the results cudaMemcpy(result, result_dev, sizeof(int) * n, cudaMemcpyDeviceToHost); // Step 5: Free device memory cudaFree(a_dev); cudaFree(b_dev); cudaFree(result_dev); } void print_vector(int *array, int n) { int i; for (i=0; i<n; i++) printf("%d ", array[i]); printf("\n"); } // This function print out the different in time void print_time(timeval start, timeval end) { printf("Time = %ld us\n", ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec ))); } // loop over size int vector_add_size(int size) { int n = size; // Length of the arrays int rand = n % 7; // int a[] = {0, 1, 2, 3, 4}; // int b[] = {5, 6, 7, 8, 9}; // int host_result[5]; // int device_result[5]; int *a = (int *) malloc(n * sizeof(int)); int *b = (int *) malloc(n * sizeof(int)); int *host_result = (int *) malloc(n * sizeof(int)); int *device_result = (int *) malloc(n * sizeof(int)); // verify malloc if (!(a && b && host_result && device_result)) { printf("out of memory\n"); return(-1); } struct timeval start, end; // create variable size matrix for (int i = rand; i < n; ++i) { a[i] = i; b[i] = SIZE + i; } /* int deviceCount; int device; // show cuda capability cudaGetDeviceCount(&deviceCount); for (device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); printf("Device %d has compute capability %d.%d.\n", device, deviceProp.major, deviceProp.minor); } */ // print answers: printf("The CPU's answer: "); gettimeofday(&start, NULL); add_vectors_host(host_result, a, b, n); gettimeofday(&end, NULL); // print_vector(host_result, n); print_time(start, end); printf("The GPU's answer: "); gettimeofday(&start, NULL); add_vectors_dev(device_result, a, b, n); gettimeofday(&end, NULL); // print_vector(device_result, n); print_time(start, end); // free memory free(a); free(b); free(host_result); free(device_result); return 0; } // main function int main(void) { int size = SIZE; int min = size; int max = 30000000; int inc = 50000; // for (int i = size; i < MAX; i*=size) for (int i = min; i <= max; i+=inc) { printf("\nsize = %d\n", i); vector_add_size(i); } return 0; }
.file "tmpxft_000173c9_00000000-6_hw1.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2065: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2065: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z16add_vectors_hostPiS_S_i .type _Z16add_vectors_hostPiS_S_i, @function _Z16add_vectors_hostPiS_S_i: .LFB2057: .cfi_startproc endbr64 testl %ecx, %ecx jle .L3 movslq %ecx, %rcx leaq 0(,%rcx,4), %r8 movl $0, %eax .L5: movl (%rdx,%rax), %ecx addl (%rsi,%rax), %ecx movl %ecx, (%rdi,%rax) addq $4, %rax cmpq %r8, %rax jne .L5 .L3: ret .cfi_endproc .LFE2057: .size _Z16add_vectors_hostPiS_S_i, .-_Z16add_vectors_hostPiS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%d " .LC1: .string "\n" .text .globl _Z12print_vectorPii .type _Z12print_vectorPii, @function _Z12print_vectorPii: .LFB2059: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 testl %esi, %esi jle .L8 movq %rdi, %rbx movslq %esi, %rsi leaq (%rdi,%rsi,4), %r12 leaq .LC0(%rip), %rbp .L9: movl (%rbx), %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $4, %rbx cmpq %r12, %rbx jne .L9 .L8: leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _Z12print_vectorPii, .-_Z12print_vectorPii .section .rodata.str1.1 .LC2: .string "Time = %ld us\n" .text .globl _Z10print_time7timevalS_ .type _Z10print_time7timevalS_, @function _Z10print_time7timevalS_: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 imulq $1000000, %rdx, %rdx addq %rcx, %rdx imulq $1000000, %rdi, %rdi addq %rsi, %rdi subq %rdi, %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _Z10print_time7timevalS_, .-_Z10print_time7timevalS_ .globl _Z43__device_stub__Z18add_vectors_kernelPiS_S_iPiS_S_i .type _Z43__device_stub__Z18add_vectors_kernelPiS_S_iPiS_S_i, @function _Z43__device_stub__Z18add_vectors_kernelPiS_S_iPiS_S_i: .LFB2087: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L18 .L14: movq 136(%rsp), %rax subq %fs:40, %rax jne .L19 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L18: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z18add_vectors_kernelPiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L14 .L19: call __stack_chk_fail@PLT .cfi_endproc .LFE2087: .size _Z43__device_stub__Z18add_vectors_kernelPiS_S_iPiS_S_i, .-_Z43__device_stub__Z18add_vectors_kernelPiS_S_iPiS_S_i .globl _Z18add_vectors_kernelPiS_S_i .type _Z18add_vectors_kernelPiS_S_i, @function _Z18add_vectors_kernelPiS_S_i: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z43__device_stub__Z18add_vectors_kernelPiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _Z18add_vectors_kernelPiS_S_i, .-_Z18add_vectors_kernelPiS_S_i .globl _Z15add_vectors_devPiS_S_i .type _Z15add_vectors_devPiS_S_i, @function _Z15add_vectors_devPiS_S_i: .LFB2058: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $64, %rsp .cfi_def_cfa_offset 112 movq %rdi, %r12 movq %rsi, %r14 movq %rdx, %r13 movl %ecx, %ebp movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movslq %ecx, %rbx salq $2, %rbx leaq 8(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 16(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 24(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbx, %rdx movq %r14, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %r13, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT leal 1022(%rbp), %eax movl %ebp, %edx addl $511, %edx cmovns %edx, %eax sarl $9, %eax movl %eax, 32(%rsp) movl $1, 36(%rsp) movl $512, 44(%rsp) movl $1, 48(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L26 .L23: movl $2, %ecx movq %rbx, %rdx movq 24(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L27 addq $64, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L26: .cfi_restore_state movl %ebp, %ecx movq 16(%rsp), %rdx movq 8(%rsp), %rsi movq 24(%rsp), %rdi call _Z43__device_stub__Z18add_vectors_kernelPiS_S_iPiS_S_i jmp .L23 .L27: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size _Z15add_vectors_devPiS_S_i, .-_Z15add_vectors_devPiS_S_i .section .rodata.str1.1 .LC3: .string "out of memory\n" .LC4: .string "The CPU's answer: " .LC5: .string "The GPU's answer: " .text .globl _Z15vector_add_sizei .type _Z15vector_add_sizei, @function _Z15vector_add_sizei: .LFB2061: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $72, %rsp .cfi_def_cfa_offset 128 movl %edi, %ebx movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movslq %edi, %r13 imulq $-1840700269, %r13, %r15 shrq $32, %r15 addl %edi, %r15d sarl $2, %r15d movl %edi, %eax sarl $31, %eax subl %eax, %r15d leal 0(,%r15,8), %eax subl %r15d, %eax movl %edi, %r15d subl %eax, %r15d salq $2, %r13 movq %r13, %rdi call malloc@PLT movq %rax, %r12 movq %r13, %rdi call malloc@PLT movq %rax, %rbp movq %r13, %rdi call malloc@PLT movq %rax, %r14 movq %r13, %rdi call malloc@PLT testq %r12, %r12 je .L29 movq %rax, %r13 testq %rbp, %rbp je .L29 testq %r14, %r14 je .L29 testq %rax, %rax je .L29 movslq %r15d, %rax cmpl %r15d, %ebx jle .L31 .L33: movl %eax, (%r12,%rax,4) leal 2(%rax), %edx movl %edx, 0(%rbp,%rax,4) addq $1, %rax cmpl %eax, %ebx jg .L33 .L31: leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 16(%rsp), %r15 movl $0, %esi movq %r15, %rdi call gettimeofday@PLT movl %ebx, %ecx movq %rbp, %rdx movq %r12, %rsi movq %r14, %rdi call _Z16add_vectors_hostPiS_S_i leaq 32(%rsp), %rax movl $0, %esi movq %rax, 8(%rsp) movq %rax, %rdi call gettimeofday@PLT movq 32(%rsp), %rdx movq 40(%rsp), %rcx movq 16(%rsp), %rdi movq 24(%rsp), %rsi call _Z10print_time7timevalS_ leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $0, %esi movq %r15, %rdi call gettimeofday@PLT movl %ebx, %ecx movq %rbp, %rdx movq %r12, %rsi movq %r13, %rdi call _Z15add_vectors_devPiS_S_i movl $0, %esi movq 8(%rsp), %rdi call gettimeofday@PLT movq 32(%rsp), %rdx movq 40(%rsp), %rcx movq 16(%rsp), %rdi movq 24(%rsp), %rsi call _Z10print_time7timevalS_ movq %r12, %rdi call free@PLT movq %rbp, %rdi call free@PLT movq %r14, %rdi call free@PLT movq %r13, %rdi call free@PLT movl $0, %eax .L28: movq 56(%rsp), %rdx subq %fs:40, %rdx jne .L38 addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $-1, %eax jmp .L28 .L38: call __stack_chk_fail@PLT .cfi_endproc .LFE2061: .size _Z15vector_add_sizei, .-_Z15vector_add_sizei .section .rodata.str1.1 .LC6: .string "\nsize = %d\n" .text .globl main .type main, @function main: .LFB2062: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movl $2, %ebx leaq .LC6(%rip), %rbp .L40: movl %ebx, %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl %ebx, %edi call _Z15vector_add_sizei addl $50000, %ebx cmpl $30000002, %ebx jne .L40 movl $0, %eax addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size main, .-main .section .rodata.str1.1 .LC7: .string "_Z18add_vectors_kernelPiS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2090: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z18add_vectors_kernelPiS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2090: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* KAM PUI SO (ANTHONY) CS 510 GPU Homework 1 The Cross-Over Point CUDA really shines when given problems involving lots of data, but for small problems, using CUDA can be slower than a pure CPU solution. Since it can be difficult to get a feel for how large a problem needs to be before using the GPU becomes useful, this lab encourages you to find the "crossover point" for vector addition. Specifically: how large do the vectors need to be for the speed of GPU vector addition to eclipse the speed of CPU vector addition? Modify the vector_addition.cu example to time how long it takes the CPU and GPU vector addition functions to operate on vectors of different magnitudes. Find (roughly) what magnitude constitutes the cross-over point for this problem on your system. */ #include <sys/time.h> #include <time.h> #include <stdio.h> const int SIZE = 2; const int MAX = 214783647 ; /* The old-fashioned CPU-only way to add two vectors */ void add_vectors_host(int *result, int *a, int *b, int n) { for (int i=0; i<n; i++) result[i] = a[i] + b[i]; } /* The kernel that will execute on the GPU */ __global__ void add_vectors_kernel(int *result, int *a, int *b, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; // If we have more threads than the magnitude of our vector, we need to // make sure that the excess threads don't try to save results into // unallocated memory. if (idx < n) result[idx] = a[idx] + b[idx]; } /* This function encapsulates the process of creating and tearing down the * environment used to execute our vector addition kernel. The steps of the * process are: * 1. Allocate memory on the device to hold our vectors * 2. Copy the vectors to device memory * 3. Execute the kernel * 4. Retrieve the result vector from the device by copying it to the host * 5. Free memory on the device */ void add_vectors_dev(int *result, int *a, int *b, int n) { // Step 1: Allocate memory int *a_dev, *b_dev, *result_dev; // Since cudaMalloc does not return a pointer like C's traditional malloc // (it returns a success status instead), we provide as it's first argument // the address of our device pointer variable so that it can change the // value of our pointer to the correct device address. cudaMalloc((void **) &a_dev, sizeof(int) * n); cudaMalloc((void **) &b_dev, sizeof(int) * n); cudaMalloc((void **) &result_dev, sizeof(int) * n); // Step 2: Copy the input vectors to the device cudaMemcpy(a_dev, a, sizeof(int) * n, cudaMemcpyHostToDevice); cudaMemcpy(b_dev, b, sizeof(int) * n, cudaMemcpyHostToDevice); // Step 3: Invoke the kernel // We allocate enough blocks (each 512 threads long) in the grid to // accomodate all `n` elements in the vectors. The 512 long block size // is somewhat arbitrary, but with the constraint that we know the // hardware will support blocks of that size. dim3 dimGrid((n + 512 - 1) / 512, 1, 1); dim3 dimBlock(512, 1, 1); add_vectors_kernel<<<dimGrid, dimBlock>>>(result_dev, a_dev, b_dev, n); // Step 4: Retrieve the results cudaMemcpy(result, result_dev, sizeof(int) * n, cudaMemcpyDeviceToHost); // Step 5: Free device memory cudaFree(a_dev); cudaFree(b_dev); cudaFree(result_dev); } void print_vector(int *array, int n) { int i; for (i=0; i<n; i++) printf("%d ", array[i]); printf("\n"); } // This function print out the different in time void print_time(timeval start, timeval end) { printf("Time = %ld us\n", ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec ))); } // loop over size int vector_add_size(int size) { int n = size; // Length of the arrays int rand = n % 7; // int a[] = {0, 1, 2, 3, 4}; // int b[] = {5, 6, 7, 8, 9}; // int host_result[5]; // int device_result[5]; int *a = (int *) malloc(n * sizeof(int)); int *b = (int *) malloc(n * sizeof(int)); int *host_result = (int *) malloc(n * sizeof(int)); int *device_result = (int *) malloc(n * sizeof(int)); // verify malloc if (!(a && b && host_result && device_result)) { printf("out of memory\n"); return(-1); } struct timeval start, end; // create variable size matrix for (int i = rand; i < n; ++i) { a[i] = i; b[i] = SIZE + i; } /* int deviceCount; int device; // show cuda capability cudaGetDeviceCount(&deviceCount); for (device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); printf("Device %d has compute capability %d.%d.\n", device, deviceProp.major, deviceProp.minor); } */ // print answers: printf("The CPU's answer: "); gettimeofday(&start, NULL); add_vectors_host(host_result, a, b, n); gettimeofday(&end, NULL); // print_vector(host_result, n); print_time(start, end); printf("The GPU's answer: "); gettimeofday(&start, NULL); add_vectors_dev(device_result, a, b, n); gettimeofday(&end, NULL); // print_vector(device_result, n); print_time(start, end); // free memory free(a); free(b); free(host_result); free(device_result); return 0; } // main function int main(void) { int size = SIZE; int min = size; int max = 30000000; int inc = 50000; // for (int i = size; i < MAX; i*=size) for (int i = min; i <= max; i+=inc) { printf("\nsize = %d\n", i); vector_add_size(i); } return 0; }
/* KAM PUI SO (ANTHONY) CS 510 GPU Homework 1 The Cross-Over Point CUDA really shines when given problems involving lots of data, but for small problems, using CUDA can be slower than a pure CPU solution. Since it can be difficult to get a feel for how large a problem needs to be before using the GPU becomes useful, this lab encourages you to find the "crossover point" for vector addition. Specifically: how large do the vectors need to be for the speed of GPU vector addition to eclipse the speed of CPU vector addition? Modify the vector_addition.cu example to time how long it takes the CPU and GPU vector addition functions to operate on vectors of different magnitudes. Find (roughly) what magnitude constitutes the cross-over point for this problem on your system. */ #include <hip/hip_runtime.h> #include <sys/time.h> #include <time.h> #include <stdio.h> const int SIZE = 2; const int MAX = 214783647 ; /* The old-fashioned CPU-only way to add two vectors */ void add_vectors_host(int *result, int *a, int *b, int n) { for (int i=0; i<n; i++) result[i] = a[i] + b[i]; } /* The kernel that will execute on the GPU */ __global__ void add_vectors_kernel(int *result, int *a, int *b, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; // If we have more threads than the magnitude of our vector, we need to // make sure that the excess threads don't try to save results into // unallocated memory. if (idx < n) result[idx] = a[idx] + b[idx]; } /* This function encapsulates the process of creating and tearing down the * environment used to execute our vector addition kernel. The steps of the * process are: * 1. Allocate memory on the device to hold our vectors * 2. Copy the vectors to device memory * 3. Execute the kernel * 4. Retrieve the result vector from the device by copying it to the host * 5. Free memory on the device */ void add_vectors_dev(int *result, int *a, int *b, int n) { // Step 1: Allocate memory int *a_dev, *b_dev, *result_dev; // Since cudaMalloc does not return a pointer like C's traditional malloc // (it returns a success status instead), we provide as it's first argument // the address of our device pointer variable so that it can change the // value of our pointer to the correct device address. hipMalloc((void **) &a_dev, sizeof(int) * n); hipMalloc((void **) &b_dev, sizeof(int) * n); hipMalloc((void **) &result_dev, sizeof(int) * n); // Step 2: Copy the input vectors to the device hipMemcpy(a_dev, a, sizeof(int) * n, hipMemcpyHostToDevice); hipMemcpy(b_dev, b, sizeof(int) * n, hipMemcpyHostToDevice); // Step 3: Invoke the kernel // We allocate enough blocks (each 512 threads long) in the grid to // accomodate all `n` elements in the vectors. The 512 long block size // is somewhat arbitrary, but with the constraint that we know the // hardware will support blocks of that size. dim3 dimGrid((n + 512 - 1) / 512, 1, 1); dim3 dimBlock(512, 1, 1); add_vectors_kernel<<<dimGrid, dimBlock>>>(result_dev, a_dev, b_dev, n); // Step 4: Retrieve the results hipMemcpy(result, result_dev, sizeof(int) * n, hipMemcpyDeviceToHost); // Step 5: Free device memory hipFree(a_dev); hipFree(b_dev); hipFree(result_dev); } void print_vector(int *array, int n) { int i; for (i=0; i<n; i++) printf("%d ", array[i]); printf("\n"); } // This function print out the different in time void print_time(timeval start, timeval end) { printf("Time = %ld us\n", ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec ))); } // loop over size int vector_add_size(int size) { int n = size; // Length of the arrays int rand = n % 7; // int a[] = {0, 1, 2, 3, 4}; // int b[] = {5, 6, 7, 8, 9}; // int host_result[5]; // int device_result[5]; int *a = (int *) malloc(n * sizeof(int)); int *b = (int *) malloc(n * sizeof(int)); int *host_result = (int *) malloc(n * sizeof(int)); int *device_result = (int *) malloc(n * sizeof(int)); // verify malloc if (!(a && b && host_result && device_result)) { printf("out of memory\n"); return(-1); } struct timeval start, end; // create variable size matrix for (int i = rand; i < n; ++i) { a[i] = i; b[i] = SIZE + i; } /* int deviceCount; int device; // show cuda capability cudaGetDeviceCount(&deviceCount); for (device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); printf("Device %d has compute capability %d.%d.\n", device, deviceProp.major, deviceProp.minor); } */ // print answers: printf("The CPU's answer: "); gettimeofday(&start, NULL); add_vectors_host(host_result, a, b, n); gettimeofday(&end, NULL); // print_vector(host_result, n); print_time(start, end); printf("The GPU's answer: "); gettimeofday(&start, NULL); add_vectors_dev(device_result, a, b, n); gettimeofday(&end, NULL); // print_vector(device_result, n); print_time(start, end); // free memory free(a); free(b); free(host_result); free(device_result); return 0; } // main function int main(void) { int size = SIZE; int min = size; int max = 30000000; int inc = 50000; // for (int i = size; i < MAX; i*=size) for (int i = min; i <= max; i+=inc) { printf("\nsize = %d\n", i); vector_add_size(i); } return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/* KAM PUI SO (ANTHONY) CS 510 GPU Homework 1 The Cross-Over Point CUDA really shines when given problems involving lots of data, but for small problems, using CUDA can be slower than a pure CPU solution. Since it can be difficult to get a feel for how large a problem needs to be before using the GPU becomes useful, this lab encourages you to find the "crossover point" for vector addition. Specifically: how large do the vectors need to be for the speed of GPU vector addition to eclipse the speed of CPU vector addition? Modify the vector_addition.cu example to time how long it takes the CPU and GPU vector addition functions to operate on vectors of different magnitudes. Find (roughly) what magnitude constitutes the cross-over point for this problem on your system. */ #include <hip/hip_runtime.h> #include <sys/time.h> #include <time.h> #include <stdio.h> const int SIZE = 2; const int MAX = 214783647 ; /* The old-fashioned CPU-only way to add two vectors */ void add_vectors_host(int *result, int *a, int *b, int n) { for (int i=0; i<n; i++) result[i] = a[i] + b[i]; } /* The kernel that will execute on the GPU */ __global__ void add_vectors_kernel(int *result, int *a, int *b, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; // If we have more threads than the magnitude of our vector, we need to // make sure that the excess threads don't try to save results into // unallocated memory. if (idx < n) result[idx] = a[idx] + b[idx]; } /* This function encapsulates the process of creating and tearing down the * environment used to execute our vector addition kernel. The steps of the * process are: * 1. Allocate memory on the device to hold our vectors * 2. Copy the vectors to device memory * 3. Execute the kernel * 4. Retrieve the result vector from the device by copying it to the host * 5. Free memory on the device */ void add_vectors_dev(int *result, int *a, int *b, int n) { // Step 1: Allocate memory int *a_dev, *b_dev, *result_dev; // Since cudaMalloc does not return a pointer like C's traditional malloc // (it returns a success status instead), we provide as it's first argument // the address of our device pointer variable so that it can change the // value of our pointer to the correct device address. hipMalloc((void **) &a_dev, sizeof(int) * n); hipMalloc((void **) &b_dev, sizeof(int) * n); hipMalloc((void **) &result_dev, sizeof(int) * n); // Step 2: Copy the input vectors to the device hipMemcpy(a_dev, a, sizeof(int) * n, hipMemcpyHostToDevice); hipMemcpy(b_dev, b, sizeof(int) * n, hipMemcpyHostToDevice); // Step 3: Invoke the kernel // We allocate enough blocks (each 512 threads long) in the grid to // accomodate all `n` elements in the vectors. The 512 long block size // is somewhat arbitrary, but with the constraint that we know the // hardware will support blocks of that size. dim3 dimGrid((n + 512 - 1) / 512, 1, 1); dim3 dimBlock(512, 1, 1); add_vectors_kernel<<<dimGrid, dimBlock>>>(result_dev, a_dev, b_dev, n); // Step 4: Retrieve the results hipMemcpy(result, result_dev, sizeof(int) * n, hipMemcpyDeviceToHost); // Step 5: Free device memory hipFree(a_dev); hipFree(b_dev); hipFree(result_dev); } void print_vector(int *array, int n) { int i; for (i=0; i<n; i++) printf("%d ", array[i]); printf("\n"); } // This function print out the different in time void print_time(timeval start, timeval end) { printf("Time = %ld us\n", ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec ))); } // loop over size int vector_add_size(int size) { int n = size; // Length of the arrays int rand = n % 7; // int a[] = {0, 1, 2, 3, 4}; // int b[] = {5, 6, 7, 8, 9}; // int host_result[5]; // int device_result[5]; int *a = (int *) malloc(n * sizeof(int)); int *b = (int *) malloc(n * sizeof(int)); int *host_result = (int *) malloc(n * sizeof(int)); int *device_result = (int *) malloc(n * sizeof(int)); // verify malloc if (!(a && b && host_result && device_result)) { printf("out of memory\n"); return(-1); } struct timeval start, end; // create variable size matrix for (int i = rand; i < n; ++i) { a[i] = i; b[i] = SIZE + i; } /* int deviceCount; int device; // show cuda capability cudaGetDeviceCount(&deviceCount); for (device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); printf("Device %d has compute capability %d.%d.\n", device, deviceProp.major, deviceProp.minor); } */ // print answers: printf("The CPU's answer: "); gettimeofday(&start, NULL); add_vectors_host(host_result, a, b, n); gettimeofday(&end, NULL); // print_vector(host_result, n); print_time(start, end); printf("The GPU's answer: "); gettimeofday(&start, NULL); add_vectors_dev(device_result, a, b, n); gettimeofday(&end, NULL); // print_vector(device_result, n); print_time(start, end); // free memory free(a); free(b); free(host_result); free(device_result); return 0; } // main function int main(void) { int size = SIZE; int min = size; int max = 30000000; int inc = 50000; // for (int i = size; i < MAX; i*=size) for (int i = min; i <= max; i+=inc) { printf("\nsize = %d\n", i); vector_add_size(i); } return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z18add_vectors_kernelPiS_S_i .globl _Z18add_vectors_kernelPiS_S_i .p2align 8 .type _Z18add_vectors_kernelPiS_S_i,@function _Z18add_vectors_kernelPiS_S_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s6, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s4, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo s_waitcnt vmcnt(0) v_add_nc_u32_e32 v2, v3, v2 global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18add_vectors_kernelPiS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z18add_vectors_kernelPiS_S_i, .Lfunc_end0-_Z18add_vectors_kernelPiS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18add_vectors_kernelPiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z18add_vectors_kernelPiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* KAM PUI SO (ANTHONY) CS 510 GPU Homework 1 The Cross-Over Point CUDA really shines when given problems involving lots of data, but for small problems, using CUDA can be slower than a pure CPU solution. Since it can be difficult to get a feel for how large a problem needs to be before using the GPU becomes useful, this lab encourages you to find the "crossover point" for vector addition. Specifically: how large do the vectors need to be for the speed of GPU vector addition to eclipse the speed of CPU vector addition? Modify the vector_addition.cu example to time how long it takes the CPU and GPU vector addition functions to operate on vectors of different magnitudes. Find (roughly) what magnitude constitutes the cross-over point for this problem on your system. */ #include <hip/hip_runtime.h> #include <sys/time.h> #include <time.h> #include <stdio.h> const int SIZE = 2; const int MAX = 214783647 ; /* The old-fashioned CPU-only way to add two vectors */ void add_vectors_host(int *result, int *a, int *b, int n) { for (int i=0; i<n; i++) result[i] = a[i] + b[i]; } /* The kernel that will execute on the GPU */ __global__ void add_vectors_kernel(int *result, int *a, int *b, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; // If we have more threads than the magnitude of our vector, we need to // make sure that the excess threads don't try to save results into // unallocated memory. if (idx < n) result[idx] = a[idx] + b[idx]; } /* This function encapsulates the process of creating and tearing down the * environment used to execute our vector addition kernel. The steps of the * process are: * 1. Allocate memory on the device to hold our vectors * 2. Copy the vectors to device memory * 3. Execute the kernel * 4. Retrieve the result vector from the device by copying it to the host * 5. Free memory on the device */ void add_vectors_dev(int *result, int *a, int *b, int n) { // Step 1: Allocate memory int *a_dev, *b_dev, *result_dev; // Since cudaMalloc does not return a pointer like C's traditional malloc // (it returns a success status instead), we provide as it's first argument // the address of our device pointer variable so that it can change the // value of our pointer to the correct device address. hipMalloc((void **) &a_dev, sizeof(int) * n); hipMalloc((void **) &b_dev, sizeof(int) * n); hipMalloc((void **) &result_dev, sizeof(int) * n); // Step 2: Copy the input vectors to the device hipMemcpy(a_dev, a, sizeof(int) * n, hipMemcpyHostToDevice); hipMemcpy(b_dev, b, sizeof(int) * n, hipMemcpyHostToDevice); // Step 3: Invoke the kernel // We allocate enough blocks (each 512 threads long) in the grid to // accomodate all `n` elements in the vectors. The 512 long block size // is somewhat arbitrary, but with the constraint that we know the // hardware will support blocks of that size. dim3 dimGrid((n + 512 - 1) / 512, 1, 1); dim3 dimBlock(512, 1, 1); add_vectors_kernel<<<dimGrid, dimBlock>>>(result_dev, a_dev, b_dev, n); // Step 4: Retrieve the results hipMemcpy(result, result_dev, sizeof(int) * n, hipMemcpyDeviceToHost); // Step 5: Free device memory hipFree(a_dev); hipFree(b_dev); hipFree(result_dev); } void print_vector(int *array, int n) { int i; for (i=0; i<n; i++) printf("%d ", array[i]); printf("\n"); } // This function print out the different in time void print_time(timeval start, timeval end) { printf("Time = %ld us\n", ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec ))); } // loop over size int vector_add_size(int size) { int n = size; // Length of the arrays int rand = n % 7; // int a[] = {0, 1, 2, 3, 4}; // int b[] = {5, 6, 7, 8, 9}; // int host_result[5]; // int device_result[5]; int *a = (int *) malloc(n * sizeof(int)); int *b = (int *) malloc(n * sizeof(int)); int *host_result = (int *) malloc(n * sizeof(int)); int *device_result = (int *) malloc(n * sizeof(int)); // verify malloc if (!(a && b && host_result && device_result)) { printf("out of memory\n"); return(-1); } struct timeval start, end; // create variable size matrix for (int i = rand; i < n; ++i) { a[i] = i; b[i] = SIZE + i; } /* int deviceCount; int device; // show cuda capability cudaGetDeviceCount(&deviceCount); for (device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); printf("Device %d has compute capability %d.%d.\n", device, deviceProp.major, deviceProp.minor); } */ // print answers: printf("The CPU's answer: "); gettimeofday(&start, NULL); add_vectors_host(host_result, a, b, n); gettimeofday(&end, NULL); // print_vector(host_result, n); print_time(start, end); printf("The GPU's answer: "); gettimeofday(&start, NULL); add_vectors_dev(device_result, a, b, n); gettimeofday(&end, NULL); // print_vector(device_result, n); print_time(start, end); // free memory free(a); free(b); free(host_result); free(device_result); return 0; } // main function int main(void) { int size = SIZE; int min = size; int max = 30000000; int inc = 50000; // for (int i = size; i < MAX; i*=size) for (int i = min; i <= max; i+=inc) { printf("\nsize = %d\n", i); vector_add_size(i); } return 0; }
.text .file "hw1.hip" .globl _Z16add_vectors_hostPiS_S_i # -- Begin function _Z16add_vectors_hostPiS_S_i .p2align 4, 0x90 .type _Z16add_vectors_hostPiS_S_i,@function _Z16add_vectors_hostPiS_S_i: # @_Z16add_vectors_hostPiS_S_i .cfi_startproc # %bb.0: testl %ecx, %ecx jle .LBB0_3 # %bb.1: # %.lr.ph.preheader movl %ecx, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movl (%rdx,%rcx,4), %r8d addl (%rsi,%rcx,4), %r8d movl %r8d, (%rdi,%rcx,4) incq %rcx cmpq %rcx, %rax jne .LBB0_2 .LBB0_3: # %._crit_edge retq .Lfunc_end0: .size _Z16add_vectors_hostPiS_S_i, .Lfunc_end0-_Z16add_vectors_hostPiS_S_i .cfi_endproc # -- End function .globl _Z33__device_stub__add_vectors_kernelPiS_S_i # -- Begin function _Z33__device_stub__add_vectors_kernelPiS_S_i .p2align 4, 0x90 .type _Z33__device_stub__add_vectors_kernelPiS_S_i,@function _Z33__device_stub__add_vectors_kernelPiS_S_i: # @_Z33__device_stub__add_vectors_kernelPiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18add_vectors_kernelPiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z33__device_stub__add_vectors_kernelPiS_S_i, .Lfunc_end1-_Z33__device_stub__add_vectors_kernelPiS_S_i .cfi_endproc # -- End function .globl _Z15add_vectors_devPiS_S_i # -- Begin function _Z15add_vectors_devPiS_S_i .p2align 4, 0x90 .type _Z15add_vectors_devPiS_S_i,@function _Z15add_vectors_devPiS_S_i: # @_Z15add_vectors_devPiS_S_i .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $152, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %ecx, %ebp movq %rdx, %r12 movq %rsi, %r13 movq %rdi, %r14 movslq %ecx, %r15 leaq (,%r15,4), %rbx leaq 24(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 16(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 8(%rsp), %rdi movq %rbx, %rsi callq hipMalloc movq 24(%rsp), %rdi movq %r13, %rsi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movq %r12, %rsi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy leal 511(%r15), %eax addl $1022, %r15d # imm = 0x3FE testl %eax, %eax cmovnsl %eax, %r15d sarl $9, %r15d movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %r15 orq $512, %rdx # imm = 0x200 movq %r15, %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_2 # %bb.1: movq 8(%rsp), %rax movq 24(%rsp), %rcx movq 16(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) movl %ebp, 36(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 36(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z18add_vectors_kernelPiS_S_i, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_2: movq 8(%rsp), %rsi movq %r14, %rdi movq %rbx, %rdx movl $2, %ecx callq hipMemcpy movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree addq $152, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z15add_vectors_devPiS_S_i, .Lfunc_end2-_Z15add_vectors_devPiS_S_i .cfi_endproc # -- End function .globl _Z12print_vectorPii # -- Begin function _Z12print_vectorPii .p2align 4, 0x90 .type _Z12print_vectorPii,@function _Z12print_vectorPii: # @_Z12print_vectorPii .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB3_4 # %bb.1: # %.lr.ph.preheader pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %rbx movl %esi, %r14d xorl %r15d, %r15d .p2align 4, 0x90 .LBB3_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movl (%rbx,%r15,4), %esi movl $.L.str, %edi xorl %eax, %eax callq printf incq %r15 cmpq %r15, %r14 jne .LBB3_2 # %bb.3: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r14 .cfi_restore %r15 .LBB3_4: # %._crit_edge movl $10, %edi jmp putchar@PLT # TAILCALL .Lfunc_end3: .size _Z12print_vectorPii, .Lfunc_end3-_Z12print_vectorPii .cfi_endproc # -- End function .globl _Z10print_time7timevalS_ # -- Begin function _Z10print_time7timevalS_ .p2align 4, 0x90 .type _Z10print_time7timevalS_,@function _Z10print_time7timevalS_: # @_Z10print_time7timevalS_ .cfi_startproc # %bb.0: subq %rdi, %rdx imulq $1000000, %rdx, %rax # imm = 0xF4240 subq %rsi, %rcx addq %rcx, %rax movl $.L.str.2, %edi movq %rax, %rsi xorl %eax, %eax jmp printf # TAILCALL .Lfunc_end4: .size _Z10print_time7timevalS_, .Lfunc_end4-_Z10print_time7timevalS_ .cfi_endproc # -- End function .globl _Z15vector_add_sizei # -- Begin function _Z15vector_add_sizei .p2align 4, 0x90 .type _Z15vector_add_sizei,@function _Z15vector_add_sizei: # @_Z15vector_add_sizei .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $40, %rsp .cfi_def_cfa_offset 96 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %edi, %ebp movslq %edi, %r12 leaq (,%r12,4), %r15 movq %r15, %rdi callq malloc movq %rax, %rbx movq %r15, %rdi callq malloc movq %rax, %r14 movq %r15, %rdi callq malloc testq %rbx, %rbx je .LBB5_3 # %bb.1: testq %r14, %r14 je .LBB5_3 # %bb.2: movq %rax, %r15 testq %rax, %rax je .LBB5_3 # %bb.4: imulq $-1840700269, %r12, %rax # imm = 0x92492493 shrq $32, %rax addl %ebp, %eax movl %eax, %ecx shrl $31, %ecx sarl $2, %eax addl %ecx, %eax leal (,%rax,8), %ecx subl %ecx, %eax addl %ebp, %eax cmpl %r12d, %eax jge .LBB5_7 # %bb.5: # %.lr.ph.preheader cltq .p2align 4, 0x90 .LBB5_6: # %.lr.ph # =>This Inner Loop Header: Depth=1 movl %eax, (%rbx,%rax,4) leal 2(%rax), %ecx movl %ecx, (%r14,%rax,4) incq %rax cmpq %rax, %r12 jne .LBB5_6 .LBB5_7: # %._crit_edge xorl %r12d, %r12d movl $.L.str.4, %edi xorl %eax, %eax callq printf leaq 24(%rsp), %r13 movq %r13, %rdi xorl %esi, %esi callq gettimeofday leaq 8(%rsp), %rdi xorl %esi, %esi callq gettimeofday movq 8(%rsp), %rax movq 16(%rsp), %rsi subq 24(%rsp), %rax imulq $1000000, %rax, %rax # imm = 0xF4240 subq 32(%rsp), %rsi addq %rax, %rsi movl $.L.str.2, %edi xorl %eax, %eax callq printf movl $.L.str.5, %edi xorl %eax, %eax callq printf movq %r13, %rdi xorl %esi, %esi callq gettimeofday movq %r15, %rdi movq %rbx, %rsi movq %r14, %rdx movl %ebp, %ecx callq _Z15add_vectors_devPiS_S_i leaq 8(%rsp), %rdi xorl %esi, %esi callq gettimeofday movq 8(%rsp), %rax movq 16(%rsp), %rsi subq 24(%rsp), %rax imulq $1000000, %rax, %rax # imm = 0xF4240 subq 32(%rsp), %rsi addq %rax, %rsi movl $.L.str.2, %edi xorl %eax, %eax callq printf movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free jmp .LBB5_8 .LBB5_3: movl $.Lstr, %edi callq puts@PLT movl $-1, %r12d .LBB5_8: movl %r12d, %eax addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size _Z15vector_add_sizei, .Lfunc_end5-_Z15vector_add_sizei .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 movl $-49998, %ebx # imm = 0xFFFF3CB2 .p2align 4, 0x90 .LBB6_1: # =>This Inner Loop Header: Depth=1 addl $50000, %ebx # imm = 0xC350 movl $.L.str.6, %edi movl %ebx, %esi xorl %eax, %eax callq printf movl %ebx, %edi callq _Z15vector_add_sizei cmpl $29950001, %ebx # imm = 0x1C90031 jb .LBB6_1 # %bb.2: xorl %eax, %eax popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end6: .size main, .Lfunc_end6-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB7_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB7_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18add_vectors_kernelPiS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end7: .size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB8_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB8_2: retq .Lfunc_end8: .size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor .cfi_endproc # -- End function .type _Z18add_vectors_kernelPiS_S_i,@object # @_Z18add_vectors_kernelPiS_S_i .section .rodata,"a",@progbits .globl _Z18add_vectors_kernelPiS_S_i .p2align 3, 0x0 _Z18add_vectors_kernelPiS_S_i: .quad _Z33__device_stub__add_vectors_kernelPiS_S_i .size _Z18add_vectors_kernelPiS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%d " .size .L.str, 4 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "Time = %ld us\n" .size .L.str.2, 15 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "The CPU's answer: " .size .L.str.4, 19 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "The GPU's answer: " .size .L.str.5, 19 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "\nsize = %d\n" .size .L.str.6, 12 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z18add_vectors_kernelPiS_S_i" .size .L__unnamed_1, 30 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "out of memory" .size .Lstr, 14 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z33__device_stub__add_vectors_kernelPiS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z18add_vectors_kernelPiS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z18add_vectors_kernelPiS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x170] ; /* 0x00005c0006047625 */ /* 0x000fc800078e0207 */ /*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006027625 */ /* 0x0c0fe400078e0207 */ /*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x160] ; /* 0x0000580006067625 */ /* 0x000fe200078e0207 */ /*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */ /* 0x004fca0007ffe0ff */ /*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z18add_vectors_kernelPiS_S_i .globl _Z18add_vectors_kernelPiS_S_i .p2align 8 .type _Z18add_vectors_kernelPiS_S_i,@function _Z18add_vectors_kernelPiS_S_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s6, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s4, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo s_waitcnt vmcnt(0) v_add_nc_u32_e32 v2, v3, v2 global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18add_vectors_kernelPiS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z18add_vectors_kernelPiS_S_i, .Lfunc_end0-_Z18add_vectors_kernelPiS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18add_vectors_kernelPiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z18add_vectors_kernelPiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000173c9_00000000-6_hw1.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2065: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2065: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z16add_vectors_hostPiS_S_i .type _Z16add_vectors_hostPiS_S_i, @function _Z16add_vectors_hostPiS_S_i: .LFB2057: .cfi_startproc endbr64 testl %ecx, %ecx jle .L3 movslq %ecx, %rcx leaq 0(,%rcx,4), %r8 movl $0, %eax .L5: movl (%rdx,%rax), %ecx addl (%rsi,%rax), %ecx movl %ecx, (%rdi,%rax) addq $4, %rax cmpq %r8, %rax jne .L5 .L3: ret .cfi_endproc .LFE2057: .size _Z16add_vectors_hostPiS_S_i, .-_Z16add_vectors_hostPiS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%d " .LC1: .string "\n" .text .globl _Z12print_vectorPii .type _Z12print_vectorPii, @function _Z12print_vectorPii: .LFB2059: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 testl %esi, %esi jle .L8 movq %rdi, %rbx movslq %esi, %rsi leaq (%rdi,%rsi,4), %r12 leaq .LC0(%rip), %rbp .L9: movl (%rbx), %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $4, %rbx cmpq %r12, %rbx jne .L9 .L8: leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _Z12print_vectorPii, .-_Z12print_vectorPii .section .rodata.str1.1 .LC2: .string "Time = %ld us\n" .text .globl _Z10print_time7timevalS_ .type _Z10print_time7timevalS_, @function _Z10print_time7timevalS_: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 imulq $1000000, %rdx, %rdx addq %rcx, %rdx imulq $1000000, %rdi, %rdi addq %rsi, %rdi subq %rdi, %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _Z10print_time7timevalS_, .-_Z10print_time7timevalS_ .globl _Z43__device_stub__Z18add_vectors_kernelPiS_S_iPiS_S_i .type _Z43__device_stub__Z18add_vectors_kernelPiS_S_iPiS_S_i, @function _Z43__device_stub__Z18add_vectors_kernelPiS_S_iPiS_S_i: .LFB2087: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L18 .L14: movq 136(%rsp), %rax subq %fs:40, %rax jne .L19 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L18: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z18add_vectors_kernelPiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L14 .L19: call __stack_chk_fail@PLT .cfi_endproc .LFE2087: .size _Z43__device_stub__Z18add_vectors_kernelPiS_S_iPiS_S_i, .-_Z43__device_stub__Z18add_vectors_kernelPiS_S_iPiS_S_i .globl _Z18add_vectors_kernelPiS_S_i .type _Z18add_vectors_kernelPiS_S_i, @function _Z18add_vectors_kernelPiS_S_i: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z43__device_stub__Z18add_vectors_kernelPiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _Z18add_vectors_kernelPiS_S_i, .-_Z18add_vectors_kernelPiS_S_i .globl _Z15add_vectors_devPiS_S_i .type _Z15add_vectors_devPiS_S_i, @function _Z15add_vectors_devPiS_S_i: .LFB2058: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $64, %rsp .cfi_def_cfa_offset 112 movq %rdi, %r12 movq %rsi, %r14 movq %rdx, %r13 movl %ecx, %ebp movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movslq %ecx, %rbx salq $2, %rbx leaq 8(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 16(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 24(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbx, %rdx movq %r14, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %r13, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT leal 1022(%rbp), %eax movl %ebp, %edx addl $511, %edx cmovns %edx, %eax sarl $9, %eax movl %eax, 32(%rsp) movl $1, 36(%rsp) movl $512, 44(%rsp) movl $1, 48(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L26 .L23: movl $2, %ecx movq %rbx, %rdx movq 24(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L27 addq $64, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L26: .cfi_restore_state movl %ebp, %ecx movq 16(%rsp), %rdx movq 8(%rsp), %rsi movq 24(%rsp), %rdi call _Z43__device_stub__Z18add_vectors_kernelPiS_S_iPiS_S_i jmp .L23 .L27: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size _Z15add_vectors_devPiS_S_i, .-_Z15add_vectors_devPiS_S_i .section .rodata.str1.1 .LC3: .string "out of memory\n" .LC4: .string "The CPU's answer: " .LC5: .string "The GPU's answer: " .text .globl _Z15vector_add_sizei .type _Z15vector_add_sizei, @function _Z15vector_add_sizei: .LFB2061: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $72, %rsp .cfi_def_cfa_offset 128 movl %edi, %ebx movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movslq %edi, %r13 imulq $-1840700269, %r13, %r15 shrq $32, %r15 addl %edi, %r15d sarl $2, %r15d movl %edi, %eax sarl $31, %eax subl %eax, %r15d leal 0(,%r15,8), %eax subl %r15d, %eax movl %edi, %r15d subl %eax, %r15d salq $2, %r13 movq %r13, %rdi call malloc@PLT movq %rax, %r12 movq %r13, %rdi call malloc@PLT movq %rax, %rbp movq %r13, %rdi call malloc@PLT movq %rax, %r14 movq %r13, %rdi call malloc@PLT testq %r12, %r12 je .L29 movq %rax, %r13 testq %rbp, %rbp je .L29 testq %r14, %r14 je .L29 testq %rax, %rax je .L29 movslq %r15d, %rax cmpl %r15d, %ebx jle .L31 .L33: movl %eax, (%r12,%rax,4) leal 2(%rax), %edx movl %edx, 0(%rbp,%rax,4) addq $1, %rax cmpl %eax, %ebx jg .L33 .L31: leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 16(%rsp), %r15 movl $0, %esi movq %r15, %rdi call gettimeofday@PLT movl %ebx, %ecx movq %rbp, %rdx movq %r12, %rsi movq %r14, %rdi call _Z16add_vectors_hostPiS_S_i leaq 32(%rsp), %rax movl $0, %esi movq %rax, 8(%rsp) movq %rax, %rdi call gettimeofday@PLT movq 32(%rsp), %rdx movq 40(%rsp), %rcx movq 16(%rsp), %rdi movq 24(%rsp), %rsi call _Z10print_time7timevalS_ leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $0, %esi movq %r15, %rdi call gettimeofday@PLT movl %ebx, %ecx movq %rbp, %rdx movq %r12, %rsi movq %r13, %rdi call _Z15add_vectors_devPiS_S_i movl $0, %esi movq 8(%rsp), %rdi call gettimeofday@PLT movq 32(%rsp), %rdx movq 40(%rsp), %rcx movq 16(%rsp), %rdi movq 24(%rsp), %rsi call _Z10print_time7timevalS_ movq %r12, %rdi call free@PLT movq %rbp, %rdi call free@PLT movq %r14, %rdi call free@PLT movq %r13, %rdi call free@PLT movl $0, %eax .L28: movq 56(%rsp), %rdx subq %fs:40, %rdx jne .L38 addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $-1, %eax jmp .L28 .L38: call __stack_chk_fail@PLT .cfi_endproc .LFE2061: .size _Z15vector_add_sizei, .-_Z15vector_add_sizei .section .rodata.str1.1 .LC6: .string "\nsize = %d\n" .text .globl main .type main, @function main: .LFB2062: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movl $2, %ebx leaq .LC6(%rip), %rbp .L40: movl %ebx, %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl %ebx, %edi call _Z15vector_add_sizei addl $50000, %ebx cmpl $30000002, %ebx jne .L40 movl $0, %eax addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size main, .-main .section .rodata.str1.1 .LC7: .string "_Z18add_vectors_kernelPiS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2090: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z18add_vectors_kernelPiS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2090: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "hw1.hip" .globl _Z16add_vectors_hostPiS_S_i # -- Begin function _Z16add_vectors_hostPiS_S_i .p2align 4, 0x90 .type _Z16add_vectors_hostPiS_S_i,@function _Z16add_vectors_hostPiS_S_i: # @_Z16add_vectors_hostPiS_S_i .cfi_startproc # %bb.0: testl %ecx, %ecx jle .LBB0_3 # %bb.1: # %.lr.ph.preheader movl %ecx, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movl (%rdx,%rcx,4), %r8d addl (%rsi,%rcx,4), %r8d movl %r8d, (%rdi,%rcx,4) incq %rcx cmpq %rcx, %rax jne .LBB0_2 .LBB0_3: # %._crit_edge retq .Lfunc_end0: .size _Z16add_vectors_hostPiS_S_i, .Lfunc_end0-_Z16add_vectors_hostPiS_S_i .cfi_endproc # -- End function .globl _Z33__device_stub__add_vectors_kernelPiS_S_i # -- Begin function _Z33__device_stub__add_vectors_kernelPiS_S_i .p2align 4, 0x90 .type _Z33__device_stub__add_vectors_kernelPiS_S_i,@function _Z33__device_stub__add_vectors_kernelPiS_S_i: # @_Z33__device_stub__add_vectors_kernelPiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18add_vectors_kernelPiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z33__device_stub__add_vectors_kernelPiS_S_i, .Lfunc_end1-_Z33__device_stub__add_vectors_kernelPiS_S_i .cfi_endproc # -- End function .globl _Z15add_vectors_devPiS_S_i # -- Begin function _Z15add_vectors_devPiS_S_i .p2align 4, 0x90 .type _Z15add_vectors_devPiS_S_i,@function _Z15add_vectors_devPiS_S_i: # @_Z15add_vectors_devPiS_S_i .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $152, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %ecx, %ebp movq %rdx, %r12 movq %rsi, %r13 movq %rdi, %r14 movslq %ecx, %r15 leaq (,%r15,4), %rbx leaq 24(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 16(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 8(%rsp), %rdi movq %rbx, %rsi callq hipMalloc movq 24(%rsp), %rdi movq %r13, %rsi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movq %r12, %rsi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy leal 511(%r15), %eax addl $1022, %r15d # imm = 0x3FE testl %eax, %eax cmovnsl %eax, %r15d sarl $9, %r15d movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %r15 orq $512, %rdx # imm = 0x200 movq %r15, %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_2 # %bb.1: movq 8(%rsp), %rax movq 24(%rsp), %rcx movq 16(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) movl %ebp, 36(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 36(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z18add_vectors_kernelPiS_S_i, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_2: movq 8(%rsp), %rsi movq %r14, %rdi movq %rbx, %rdx movl $2, %ecx callq hipMemcpy movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree addq $152, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z15add_vectors_devPiS_S_i, .Lfunc_end2-_Z15add_vectors_devPiS_S_i .cfi_endproc # -- End function .globl _Z12print_vectorPii # -- Begin function _Z12print_vectorPii .p2align 4, 0x90 .type _Z12print_vectorPii,@function _Z12print_vectorPii: # @_Z12print_vectorPii .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB3_4 # %bb.1: # %.lr.ph.preheader pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %rbx movl %esi, %r14d xorl %r15d, %r15d .p2align 4, 0x90 .LBB3_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movl (%rbx,%r15,4), %esi movl $.L.str, %edi xorl %eax, %eax callq printf incq %r15 cmpq %r15, %r14 jne .LBB3_2 # %bb.3: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r14 .cfi_restore %r15 .LBB3_4: # %._crit_edge movl $10, %edi jmp putchar@PLT # TAILCALL .Lfunc_end3: .size _Z12print_vectorPii, .Lfunc_end3-_Z12print_vectorPii .cfi_endproc # -- End function .globl _Z10print_time7timevalS_ # -- Begin function _Z10print_time7timevalS_ .p2align 4, 0x90 .type _Z10print_time7timevalS_,@function _Z10print_time7timevalS_: # @_Z10print_time7timevalS_ .cfi_startproc # %bb.0: subq %rdi, %rdx imulq $1000000, %rdx, %rax # imm = 0xF4240 subq %rsi, %rcx addq %rcx, %rax movl $.L.str.2, %edi movq %rax, %rsi xorl %eax, %eax jmp printf # TAILCALL .Lfunc_end4: .size _Z10print_time7timevalS_, .Lfunc_end4-_Z10print_time7timevalS_ .cfi_endproc # -- End function .globl _Z15vector_add_sizei # -- Begin function _Z15vector_add_sizei .p2align 4, 0x90 .type _Z15vector_add_sizei,@function _Z15vector_add_sizei: # @_Z15vector_add_sizei .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $40, %rsp .cfi_def_cfa_offset 96 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %edi, %ebp movslq %edi, %r12 leaq (,%r12,4), %r15 movq %r15, %rdi callq malloc movq %rax, %rbx movq %r15, %rdi callq malloc movq %rax, %r14 movq %r15, %rdi callq malloc testq %rbx, %rbx je .LBB5_3 # %bb.1: testq %r14, %r14 je .LBB5_3 # %bb.2: movq %rax, %r15 testq %rax, %rax je .LBB5_3 # %bb.4: imulq $-1840700269, %r12, %rax # imm = 0x92492493 shrq $32, %rax addl %ebp, %eax movl %eax, %ecx shrl $31, %ecx sarl $2, %eax addl %ecx, %eax leal (,%rax,8), %ecx subl %ecx, %eax addl %ebp, %eax cmpl %r12d, %eax jge .LBB5_7 # %bb.5: # %.lr.ph.preheader cltq .p2align 4, 0x90 .LBB5_6: # %.lr.ph # =>This Inner Loop Header: Depth=1 movl %eax, (%rbx,%rax,4) leal 2(%rax), %ecx movl %ecx, (%r14,%rax,4) incq %rax cmpq %rax, %r12 jne .LBB5_6 .LBB5_7: # %._crit_edge xorl %r12d, %r12d movl $.L.str.4, %edi xorl %eax, %eax callq printf leaq 24(%rsp), %r13 movq %r13, %rdi xorl %esi, %esi callq gettimeofday leaq 8(%rsp), %rdi xorl %esi, %esi callq gettimeofday movq 8(%rsp), %rax movq 16(%rsp), %rsi subq 24(%rsp), %rax imulq $1000000, %rax, %rax # imm = 0xF4240 subq 32(%rsp), %rsi addq %rax, %rsi movl $.L.str.2, %edi xorl %eax, %eax callq printf movl $.L.str.5, %edi xorl %eax, %eax callq printf movq %r13, %rdi xorl %esi, %esi callq gettimeofday movq %r15, %rdi movq %rbx, %rsi movq %r14, %rdx movl %ebp, %ecx callq _Z15add_vectors_devPiS_S_i leaq 8(%rsp), %rdi xorl %esi, %esi callq gettimeofday movq 8(%rsp), %rax movq 16(%rsp), %rsi subq 24(%rsp), %rax imulq $1000000, %rax, %rax # imm = 0xF4240 subq 32(%rsp), %rsi addq %rax, %rsi movl $.L.str.2, %edi xorl %eax, %eax callq printf movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free jmp .LBB5_8 .LBB5_3: movl $.Lstr, %edi callq puts@PLT movl $-1, %r12d .LBB5_8: movl %r12d, %eax addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size _Z15vector_add_sizei, .Lfunc_end5-_Z15vector_add_sizei .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 movl $-49998, %ebx # imm = 0xFFFF3CB2 .p2align 4, 0x90 .LBB6_1: # =>This Inner Loop Header: Depth=1 addl $50000, %ebx # imm = 0xC350 movl $.L.str.6, %edi movl %ebx, %esi xorl %eax, %eax callq printf movl %ebx, %edi callq _Z15vector_add_sizei cmpl $29950001, %ebx # imm = 0x1C90031 jb .LBB6_1 # %bb.2: xorl %eax, %eax popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end6: .size main, .Lfunc_end6-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB7_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB7_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18add_vectors_kernelPiS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end7: .size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB8_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB8_2: retq .Lfunc_end8: .size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor .cfi_endproc # -- End function .type _Z18add_vectors_kernelPiS_S_i,@object # @_Z18add_vectors_kernelPiS_S_i .section .rodata,"a",@progbits .globl _Z18add_vectors_kernelPiS_S_i .p2align 3, 0x0 _Z18add_vectors_kernelPiS_S_i: .quad _Z33__device_stub__add_vectors_kernelPiS_S_i .size _Z18add_vectors_kernelPiS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%d " .size .L.str, 4 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "Time = %ld us\n" .size .L.str.2, 15 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "The CPU's answer: " .size .L.str.4, 19 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "The GPU's answer: " .size .L.str.5, 19 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "\nsize = %d\n" .size .L.str.6, 12 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z18add_vectors_kernelPiS_S_i" .size .L__unnamed_1, 30 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "out of memory" .size .Lstr, 14 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z33__device_stub__add_vectors_kernelPiS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z18add_vectors_kernelPiS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
// // Created by root on 2020/11/11. // #include "cuda_runtime.h" #include <stdio.h> __global__ void LocateThreadIdKernel() { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int z = blockDim.z * blockIdx.z + threadIdx.z; // printf("%d, %d. %d\n", threadIdx.x, threadIdx.y, threadIdx.z); printf("Thread coordinate: (%d, %d, %d)\n", x, y, z); } int main () { int x = 10, y = 15, z = 20; dim3 block(2, 3, 4); dim3 grid((x + block.x - 1) / block.x, (y + block.y - 1) / block.y, (z + block.z - 1) / block.z); LocateThreadIdKernel<<<grid, block>>>(); cudaDeviceSynchronize(); return 0; }
code for sm_80 Function : _Z20LocateThreadIdKernelv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e220000002600 */ /*0020*/ MOV R8, 0x0 ; /* 0x0000000000087802 */ /* 0x000fe40000000f00 */ /*0030*/ IADD3 R1, R1, -0x10, RZ ; /* 0xfffffff001017810 */ /* 0x000fe20007ffe0ff */ /*0040*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */ /* 0x000e260000002200 */ /*0050*/ LDC.64 R8, c[0x4][R8] ; /* 0x0100000008087b82 */ /* 0x000e620000000a00 */ /*0060*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000ea20000002500 */ /*0070*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */ /* 0x000fc60007f1e0ff */ /*0080*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000ea80000002100 */ /*0090*/ S2R R4, SR_CTAID.Z ; /* 0x0000000000047919 */ /* 0x000ee80000002700 */ /*00a0*/ S2R R7, SR_TID.Z ; /* 0x0000000000077919 */ /* 0x000ee20000002300 */ /*00b0*/ IMAD R3, R3, c[0x0][0x4], R0 ; /* 0x0000010003037a24 */ /* 0x001fe400078e0200 */ /*00c0*/ IMAD R2, R2, c[0x0][0x0], R5 ; /* 0x0000000002027a24 */ /* 0x004fc400078e0205 */ /*00d0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */ /* 0x000fc600078e00ff */ /*00e0*/ STL.64 [R1], R2 ; /* 0x0000000201007387 */ /* 0x0001e20000100a00 */ /*00f0*/ IMAD R0, R4, c[0x0][0x8], R7 ; /* 0x0000020004007a24 */ /* 0x008fe400078e0207 */ /*0100*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */ /* 0x000fe400078e00ff */ /*0110*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */ /* 0x000fe200000e06ff */ /*0120*/ STL [R1+0x8], R0 ; /* 0x0000080001007387 */ /* 0x0001e80000100800 */ /*0130*/ LEPC R2 ; /* 0x000000000002734e */ /* 0x003fe40000000000 */ /*0140*/ MOV R11, 0x1b0 ; /* 0x000001b0000b7802 */ /* 0x000fe40000000f00 */ /*0150*/ MOV R20, 0x130 ; /* 0x0000013000147802 */ /* 0x000fe40000000f00 */ /*0160*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fc40000000f00 */ /*0170*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*0180*/ IADD3 R20, P0, P1, -R20, R11, R2 ; /* 0x0000000b14147210 */ /* 0x000fc8000791e102 */ /*0190*/ IADD3.X R21, ~R0, R21, R3, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2503 */ /*01a0*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */ /* 0x000fea0003c00000 */ /*01b0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01c0*/ BRA 0x1c0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
// // Created by root on 2020/11/11. // #include "cuda_runtime.h" #include <stdio.h> __global__ void LocateThreadIdKernel() { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int z = blockDim.z * blockIdx.z + threadIdx.z; // printf("%d, %d. %d\n", threadIdx.x, threadIdx.y, threadIdx.z); printf("Thread coordinate: (%d, %d, %d)\n", x, y, z); } int main () { int x = 10, y = 15, z = 20; dim3 block(2, 3, 4); dim3 grid((x + block.x - 1) / block.x, (y + block.y - 1) / block.y, (z + block.z - 1) / block.z); LocateThreadIdKernel<<<grid, block>>>(); cudaDeviceSynchronize(); return 0; }
.file "tmpxft_0017d89a_00000000-6_ThreadIdLocate.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z39__device_stub__Z20LocateThreadIdKernelvv .type _Z39__device_stub__Z20LocateThreadIdKernelvv, @function _Z39__device_stub__Z20LocateThreadIdKernelvv: .LFB2082: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z20LocateThreadIdKernelv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z39__device_stub__Z20LocateThreadIdKernelvv, .-_Z39__device_stub__Z20LocateThreadIdKernelvv .globl _Z20LocateThreadIdKernelv .type _Z20LocateThreadIdKernelv, @function _Z20LocateThreadIdKernelv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z20LocateThreadIdKernelvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z20LocateThreadIdKernelv, .-_Z20LocateThreadIdKernelv .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movl $5, 20(%rsp) movl $5, 24(%rsp) movl $2, 8(%rsp) movl $3, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 8(%rsp), %rdx movl $4, %ecx movq 20(%rsp), %rdi movl $5, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: call cudaDeviceSynchronize@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z39__device_stub__Z20LocateThreadIdKernelvv jmp .L12 .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z20LocateThreadIdKernelv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z20LocateThreadIdKernelv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
// // Created by root on 2020/11/11. // #include "cuda_runtime.h" #include <stdio.h> __global__ void LocateThreadIdKernel() { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int z = blockDim.z * blockIdx.z + threadIdx.z; // printf("%d, %d. %d\n", threadIdx.x, threadIdx.y, threadIdx.z); printf("Thread coordinate: (%d, %d, %d)\n", x, y, z); } int main () { int x = 10, y = 15, z = 20; dim3 block(2, 3, 4); dim3 grid((x + block.x - 1) / block.x, (y + block.y - 1) / block.y, (z + block.z - 1) / block.z); LocateThreadIdKernel<<<grid, block>>>(); cudaDeviceSynchronize(); return 0; }
// // Created by root on 2020/11/11. // #include "hip/hip_runtime.h" #include <stdio.h> __global__ void LocateThreadIdKernel() { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int z = blockDim.z * blockIdx.z + threadIdx.z; // printf("%d, %d. %d\n", threadIdx.x, threadIdx.y, threadIdx.z); printf("Thread coordinate: (%d, %d, %d)\n", x, y, z); } int main () { int x = 10, y = 15, z = 20; dim3 block(2, 3, 4); dim3 grid((x + block.x - 1) / block.x, (y + block.y - 1) / block.y, (z + block.z - 1) / block.z); LocateThreadIdKernel<<<grid, block>>>(); hipDeviceSynchronize(); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
// // Created by root on 2020/11/11. // #include "hip/hip_runtime.h" #include <stdio.h> __global__ void LocateThreadIdKernel() { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int z = blockDim.z * blockIdx.z + threadIdx.z; // printf("%d, %d. %d\n", threadIdx.x, threadIdx.y, threadIdx.z); printf("Thread coordinate: (%d, %d, %d)\n", x, y, z); } int main () { int x = 10, y = 15, z = 20; dim3 block(2, 3, 4); dim3 grid((x + block.x - 1) / block.x, (y + block.y - 1) / block.y, (z + block.z - 1) / block.z); LocateThreadIdKernel<<<grid, block>>>(); hipDeviceSynchronize(); return 0; }
.text .file "ThreadIdLocate.hip" .globl _Z35__device_stub__LocateThreadIdKernelv # -- Begin function _Z35__device_stub__LocateThreadIdKernelv .p2align 4, 0x90 .type _Z35__device_stub__LocateThreadIdKernelv,@function _Z35__device_stub__LocateThreadIdKernelv: # @_Z35__device_stub__LocateThreadIdKernelv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z20LocateThreadIdKernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z35__device_stub__LocateThreadIdKernelv, .Lfunc_end0-_Z35__device_stub__LocateThreadIdKernelv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 movabsq $21474836485, %rdi # imm = 0x500000005 movabsq $12884901890, %rdx # imm = 0x300000002 movl $5, %esi movl $4, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z20LocateThreadIdKernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceSynchronize xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z20LocateThreadIdKernelv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z20LocateThreadIdKernelv,@object # @_Z20LocateThreadIdKernelv .section .rodata,"a",@progbits .globl _Z20LocateThreadIdKernelv .p2align 3, 0x0 _Z20LocateThreadIdKernelv: .quad _Z35__device_stub__LocateThreadIdKernelv .size _Z20LocateThreadIdKernelv, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z20LocateThreadIdKernelv" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z35__device_stub__LocateThreadIdKernelv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z20LocateThreadIdKernelv .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0017d89a_00000000-6_ThreadIdLocate.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z39__device_stub__Z20LocateThreadIdKernelvv .type _Z39__device_stub__Z20LocateThreadIdKernelvv, @function _Z39__device_stub__Z20LocateThreadIdKernelvv: .LFB2082: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z20LocateThreadIdKernelv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z39__device_stub__Z20LocateThreadIdKernelvv, .-_Z39__device_stub__Z20LocateThreadIdKernelvv .globl _Z20LocateThreadIdKernelv .type _Z20LocateThreadIdKernelv, @function _Z20LocateThreadIdKernelv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z20LocateThreadIdKernelvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z20LocateThreadIdKernelv, .-_Z20LocateThreadIdKernelv .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movl $5, 20(%rsp) movl $5, 24(%rsp) movl $2, 8(%rsp) movl $3, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 8(%rsp), %rdx movl $4, %ecx movq 20(%rsp), %rdi movl $5, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: call cudaDeviceSynchronize@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z39__device_stub__Z20LocateThreadIdKernelvv jmp .L12 .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z20LocateThreadIdKernelv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z20LocateThreadIdKernelv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "ThreadIdLocate.hip" .globl _Z35__device_stub__LocateThreadIdKernelv # -- Begin function _Z35__device_stub__LocateThreadIdKernelv .p2align 4, 0x90 .type _Z35__device_stub__LocateThreadIdKernelv,@function _Z35__device_stub__LocateThreadIdKernelv: # @_Z35__device_stub__LocateThreadIdKernelv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z20LocateThreadIdKernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z35__device_stub__LocateThreadIdKernelv, .Lfunc_end0-_Z35__device_stub__LocateThreadIdKernelv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 movabsq $21474836485, %rdi # imm = 0x500000005 movabsq $12884901890, %rdx # imm = 0x300000002 movl $5, %esi movl $4, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z20LocateThreadIdKernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceSynchronize xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z20LocateThreadIdKernelv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z20LocateThreadIdKernelv,@object # @_Z20LocateThreadIdKernelv .section .rodata,"a",@progbits .globl _Z20LocateThreadIdKernelv .p2align 3, 0x0 _Z20LocateThreadIdKernelv: .quad _Z35__device_stub__LocateThreadIdKernelv .size _Z20LocateThreadIdKernelv, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z20LocateThreadIdKernelv" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z35__device_stub__LocateThreadIdKernelv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z20LocateThreadIdKernelv .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void copySimilarity(float* similarities, int active_patches, int patches, int* activeMask, int target, int source) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= active_patches) return; int patch = activeMask[i]; similarities[target*patches + patch] = similarities[source*patches + patch]; }
code for sm_80 Function : _Z14copySimilarityPfiiPiii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R2, R2, R7, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fcc00078e0207 */ /*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*00a0*/ MOV R9, c[0x0][0x16c] ; /* 0x00005b0000097a02 */ /* 0x000fca0000000f00 */ /*00b0*/ IMAD R4, R9, c[0x0][0x17c], R2 ; /* 0x00005f0009047a24 */ /* 0x004fc800078e0202 */ /*00c0*/ IMAD.WIDE R4, R4, R7, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fcc00078e0207 */ /*00d0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea2000c1e1900 */ /*00e0*/ IMAD R6, R9, c[0x0][0x178], R2 ; /* 0x00005e0009067a24 */ /* 0x000fc800078e0202 */ /*00f0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x160] ; /* 0x0000580006067625 */ /* 0x000fca00078e0207 */ /*0100*/ STG.E [R6.64], R5 ; /* 0x0000000506007986 */ /* 0x004fe2000c101904 */ /*0110*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0120*/ BRA 0x120; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void copySimilarity(float* similarities, int active_patches, int patches, int* activeMask, int target, int source) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= active_patches) return; int patch = activeMask[i]; similarities[target*patches + patch] = similarities[source*patches + patch]; }
.file "tmpxft_00028369_00000000-6_copySimilarity.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z40__device_stub__Z14copySimilarityPfiiPiiiPfiiPiii .type _Z40__device_stub__Z14copySimilarityPfiiPiiiPfiiPiii, @function _Z40__device_stub__Z14copySimilarityPfiiPiiiPfiiPiii: .LFB2051: .cfi_startproc endbr64 subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movl %edx, 16(%rsp) movq %rcx, 8(%rsp) movl %r8d, 4(%rsp) movl %r9d, (%rsp) movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 4(%rsp), %rax movq %rax, 128(%rsp) movq %rsp, %rax movq %rax, 136(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 152(%rsp), %rax subq %fs:40, %rax jne .L8 addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 184 pushq 40(%rsp) .cfi_def_cfa_offset 192 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14copySimilarityPfiiPiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z40__device_stub__Z14copySimilarityPfiiPiiiPfiiPiii, .-_Z40__device_stub__Z14copySimilarityPfiiPiiiPfiiPiii .globl _Z14copySimilarityPfiiPiii .type _Z14copySimilarityPfiiPiii, @function _Z14copySimilarityPfiiPiii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z40__device_stub__Z14copySimilarityPfiiPiiiPfiiPiii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z14copySimilarityPfiiPiii, .-_Z14copySimilarityPfiiPiii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z14copySimilarityPfiiPiii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z14copySimilarityPfiiPiii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void copySimilarity(float* similarities, int active_patches, int patches, int* activeMask, int target, int source) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= active_patches) return; int patch = activeMask[i]; similarities[target*patches + patch] = similarities[source*patches + patch]; }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void copySimilarity(float* similarities, int active_patches, int patches, int* activeMask, int target, int source) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= active_patches) return; int patch = activeMask[i]; similarities[target*patches + patch] = similarities[source*patches + patch]; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void copySimilarity(float* similarities, int active_patches, int patches, int* activeMask, int target, int source) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= active_patches) return; int patch = activeMask[i]; similarities[target*patches + patch] = similarities[source*patches + patch]; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z14copySimilarityPfiiPiii .globl _Z14copySimilarityPfiiPiii .p2align 8 .type _Z14copySimilarityPfiiPiii,@function _Z14copySimilarityPfiiPiii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x10 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo global_load_b32 v0, v[0:1], off s_clause 0x1 s_load_b32 s2, s[0:1], 0xc s_load_b64 s[0:1], s[0:1], 0x0 s_waitcnt vmcnt(0) lgkmcnt(0) v_mad_u64_u32 v[1:2], null, s7, s2, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[1:2], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v1, vcc_lo, s0, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo global_load_b32 v3, v[1:2], off v_mad_u64_u32 v[1:2], null, s6, s2, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[0:1], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) global_store_b32 v[0:1], v3, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14copySimilarityPfiiPiii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z14copySimilarityPfiiPiii, .Lfunc_end0-_Z14copySimilarityPfiiPiii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14copySimilarityPfiiPiii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14copySimilarityPfiiPiii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void copySimilarity(float* similarities, int active_patches, int patches, int* activeMask, int target, int source) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= active_patches) return; int patch = activeMask[i]; similarities[target*patches + patch] = similarities[source*patches + patch]; }
.text .file "copySimilarity.hip" .globl _Z29__device_stub__copySimilarityPfiiPiii # -- Begin function _Z29__device_stub__copySimilarityPfiiPiii .p2align 4, 0x90 .type _Z29__device_stub__copySimilarityPfiiPiii,@function _Z29__device_stub__copySimilarityPfiiPiii: # @_Z29__device_stub__copySimilarityPfiiPiii .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movl %edx, 8(%rsp) movq %rcx, 64(%rsp) movl %r8d, 4(%rsp) movl %r9d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 8(%rsp), %rax movq %rax, 96(%rsp) leaq 64(%rsp), %rax movq %rax, 104(%rsp) leaq 4(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14copySimilarityPfiiPiii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $152, %rsp .cfi_adjust_cfa_offset -152 retq .Lfunc_end0: .size _Z29__device_stub__copySimilarityPfiiPiii, .Lfunc_end0-_Z29__device_stub__copySimilarityPfiiPiii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14copySimilarityPfiiPiii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z14copySimilarityPfiiPiii,@object # @_Z14copySimilarityPfiiPiii .section .rodata,"a",@progbits .globl _Z14copySimilarityPfiiPiii .p2align 3, 0x0 _Z14copySimilarityPfiiPiii: .quad _Z29__device_stub__copySimilarityPfiiPiii .size _Z14copySimilarityPfiiPiii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z14copySimilarityPfiiPiii" .size .L__unnamed_1, 27 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z29__device_stub__copySimilarityPfiiPiii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z14copySimilarityPfiiPiii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z14copySimilarityPfiiPiii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R2, R2, R7, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fcc00078e0207 */ /*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*00a0*/ MOV R9, c[0x0][0x16c] ; /* 0x00005b0000097a02 */ /* 0x000fca0000000f00 */ /*00b0*/ IMAD R4, R9, c[0x0][0x17c], R2 ; /* 0x00005f0009047a24 */ /* 0x004fc800078e0202 */ /*00c0*/ IMAD.WIDE R4, R4, R7, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fcc00078e0207 */ /*00d0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea2000c1e1900 */ /*00e0*/ IMAD R6, R9, c[0x0][0x178], R2 ; /* 0x00005e0009067a24 */ /* 0x000fc800078e0202 */ /*00f0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x160] ; /* 0x0000580006067625 */ /* 0x000fca00078e0207 */ /*0100*/ STG.E [R6.64], R5 ; /* 0x0000000506007986 */ /* 0x004fe2000c101904 */ /*0110*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0120*/ BRA 0x120; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z14copySimilarityPfiiPiii .globl _Z14copySimilarityPfiiPiii .p2align 8 .type _Z14copySimilarityPfiiPiii,@function _Z14copySimilarityPfiiPiii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x10 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo global_load_b32 v0, v[0:1], off s_clause 0x1 s_load_b32 s2, s[0:1], 0xc s_load_b64 s[0:1], s[0:1], 0x0 s_waitcnt vmcnt(0) lgkmcnt(0) v_mad_u64_u32 v[1:2], null, s7, s2, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[1:2], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v1, vcc_lo, s0, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo global_load_b32 v3, v[1:2], off v_mad_u64_u32 v[1:2], null, s6, s2, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[0:1], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) global_store_b32 v[0:1], v3, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14copySimilarityPfiiPiii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z14copySimilarityPfiiPiii, .Lfunc_end0-_Z14copySimilarityPfiiPiii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14copySimilarityPfiiPiii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14copySimilarityPfiiPiii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00028369_00000000-6_copySimilarity.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z40__device_stub__Z14copySimilarityPfiiPiiiPfiiPiii .type _Z40__device_stub__Z14copySimilarityPfiiPiiiPfiiPiii, @function _Z40__device_stub__Z14copySimilarityPfiiPiiiPfiiPiii: .LFB2051: .cfi_startproc endbr64 subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movl %edx, 16(%rsp) movq %rcx, 8(%rsp) movl %r8d, 4(%rsp) movl %r9d, (%rsp) movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 4(%rsp), %rax movq %rax, 128(%rsp) movq %rsp, %rax movq %rax, 136(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 152(%rsp), %rax subq %fs:40, %rax jne .L8 addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 184 pushq 40(%rsp) .cfi_def_cfa_offset 192 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14copySimilarityPfiiPiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z40__device_stub__Z14copySimilarityPfiiPiiiPfiiPiii, .-_Z40__device_stub__Z14copySimilarityPfiiPiiiPfiiPiii .globl _Z14copySimilarityPfiiPiii .type _Z14copySimilarityPfiiPiii, @function _Z14copySimilarityPfiiPiii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z40__device_stub__Z14copySimilarityPfiiPiiiPfiiPiii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z14copySimilarityPfiiPiii, .-_Z14copySimilarityPfiiPiii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z14copySimilarityPfiiPiii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z14copySimilarityPfiiPiii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "copySimilarity.hip" .globl _Z29__device_stub__copySimilarityPfiiPiii # -- Begin function _Z29__device_stub__copySimilarityPfiiPiii .p2align 4, 0x90 .type _Z29__device_stub__copySimilarityPfiiPiii,@function _Z29__device_stub__copySimilarityPfiiPiii: # @_Z29__device_stub__copySimilarityPfiiPiii .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movl %edx, 8(%rsp) movq %rcx, 64(%rsp) movl %r8d, 4(%rsp) movl %r9d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 8(%rsp), %rax movq %rax, 96(%rsp) leaq 64(%rsp), %rax movq %rax, 104(%rsp) leaq 4(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14copySimilarityPfiiPiii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $152, %rsp .cfi_adjust_cfa_offset -152 retq .Lfunc_end0: .size _Z29__device_stub__copySimilarityPfiiPiii, .Lfunc_end0-_Z29__device_stub__copySimilarityPfiiPiii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14copySimilarityPfiiPiii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z14copySimilarityPfiiPiii,@object # @_Z14copySimilarityPfiiPiii .section .rodata,"a",@progbits .globl _Z14copySimilarityPfiiPiii .p2align 3, 0x0 _Z14copySimilarityPfiiPiii: .quad _Z29__device_stub__copySimilarityPfiiPiii .size _Z14copySimilarityPfiiPiii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z14copySimilarityPfiiPiii" .size .L__unnamed_1, 27 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z29__device_stub__copySimilarityPfiiPiii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z14copySimilarityPfiiPiii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <cstdio> #include <stdio.h> #include <stdlib.h> #define SIZE 1024*128*512 // int == 4byte // 1GB 256 1kb // 256 1024 1mb // 256 1024 1024 1GB __global__ void input(int *a, int *b) { int i=blockIdx.x*blockDim.x*512 + threadIdx.x*512; int t=i+2048; for(;i<t;i++) { a[i]=b[i]; } } int main(void) { int *arr; int *arr2; int *carr=0; int *carr2=0; arr= (int *)malloc(sizeof(int)*SIZE); arr2= (int *)malloc(sizeof(int)*SIZE); for(int i=0; i<SIZE; i++) { arr[i] = i; } cudaMalloc((void**)&carr2,sizeof(int)*SIZE); cudaMalloc((void**)&carr,sizeof(int)*SIZE); cudaMemcpy(carr,arr,sizeof(int)*SIZE,cudaMemcpyHostToDevice); input<<<256,512>>>(carr2,carr); cudaMemcpy(arr2,carr2,sizeof(int)*SIZE,cudaMemcpyDeviceToHost); cudaFree(carr2); cudaFree(carr); free(arr2); free(arr); return 0; }
code for sm_80 Function : _Z5inputPiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0030*/ BSSY B0, 0x260 ; /* 0x0000022000007945 */ /* 0x000fe40003800000 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */ /* 0x001fc800078e0203 */ /*0060*/ IMAD.SHL.U32 R6, R6, 0x200, RZ ; /* 0x0000020006067824 */ /* 0x000fc800078e00ff */ /*0070*/ IMAD.MOV.U32 R4, RZ, RZ, R6 ; /* 0x000000ffff047224 */ /* 0x000fe200078e0006 */ /*0080*/ IADD3 R3, R6, 0x7ff, RZ ; /* 0x000007ff06037810 */ /* 0x000fc80007ffe0ff */ /*0090*/ IMNMX R3, R6, R3, !PT ; /* 0x0000000306037217 */ /* 0x000fc80007800200 */ /*00a0*/ IADD3 R0, R3.reuse, 0x1, RZ ; /* 0x0000000103007810 */ /* 0x040fe20007ffe0ff */ /*00b0*/ IMAD.IADD R3, R3, 0x1, -R6 ; /* 0x0000000103037824 */ /* 0x000fc600078e0a06 */ /*00c0*/ LOP3.LUT P1, R0, R0, 0x3, RZ, 0xc0, !PT ; /* 0x0000000300007812 */ /* 0x000fe4000782c0ff */ /*00d0*/ ISETP.GE.U32.AND P0, PT, R3, 0x3, PT ; /* 0x000000030300780c */ /* 0x000fd60003f06070 */ /*00e0*/ @!P1 BRA 0x250 ; /* 0x0000016000009947 */ /* 0x000fea0003800000 */ /*00f0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x000fc800078e00ff */ /*0100*/ IMAD.WIDE R2, R6, R5, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x000fc800078e0205 */ /*0110*/ IMAD.WIDE R4, R6, R5, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x000fc800078e0205 */ /*0120*/ IMAD.MOV.U32 R9, RZ, RZ, R2 ; /* 0x000000ffff097224 */ /* 0x000fe200078e0002 */ /*0130*/ MOV R7, R4 ; /* 0x0000000400077202 */ /* 0x000fe20000000f00 */ /*0140*/ IMAD.MOV.U32 R10, RZ, RZ, R3 ; /* 0x000000ffff0a7224 */ /* 0x000fe400078e0003 */ /*0150*/ IMAD.MOV.U32 R8, RZ, RZ, R5 ; /* 0x000000ffff087224 */ /* 0x000fe400078e0005 */ /*0160*/ IMAD.MOV.U32 R4, RZ, RZ, R6 ; /* 0x000000ffff047224 */ /* 0x000fe400078e0006 */ /*0170*/ IMAD.MOV.U32 R2, RZ, RZ, R7 ; /* 0x000000ffff027224 */ /* 0x001fe400078e0007 */ /*0180*/ IMAD.MOV.U32 R3, RZ, RZ, R8 ; /* 0x000000ffff037224 */ /* 0x000fca00078e0008 */ /*0190*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */ /* 0x0000a2000c1e1900 */ /*01a0*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */ /* 0x000fe40007ffe0ff */ /*01b0*/ IADD3 R7, P3, R7, 0x4, RZ ; /* 0x0000000407077810 */ /* 0x000fe40007f7e0ff */ /*01c0*/ ISETP.NE.AND P1, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fe40003f25270 */ /*01d0*/ IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104047810 */ /* 0x000fe20007ffe0ff */ /*01e0*/ IMAD.X R8, RZ, RZ, R8, P3 ; /* 0x000000ffff087224 */ /* 0x000fe400018e0608 */ /*01f0*/ IMAD.MOV.U32 R2, RZ, RZ, R9 ; /* 0x000000ffff027224 */ /* 0x001fe200078e0009 */ /*0200*/ IADD3 R9, P2, R9, 0x4, RZ ; /* 0x0000000409097810 */ /* 0x000fe20007f5e0ff */ /*0210*/ IMAD.MOV.U32 R3, RZ, RZ, R10 ; /* 0x000000ffff037224 */ /* 0x000fc600078e000a */ /*0220*/ IADD3.X R10, RZ, R10, RZ, P2, !PT ; /* 0x0000000aff0a7210 */ /* 0x000fe400017fe4ff */ /*0230*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x0041e2000c101904 */ /*0240*/ @P1 BRA 0x170 ; /* 0xffffff2000001947 */ /* 0x000fea000383ffff */ /*0250*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0260*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0270*/ IADD3 R7, R4, -0x4, RZ ; /* 0xfffffffc04077810 */ /* 0x000fe20007ffe0ff */ /*0280*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x001fe200078e00ff */ /*0290*/ IADD3 R0, R6, 0x7fc, RZ ; /* 0x000007fc06007810 */ /* 0x000fe20007ffe0ff */ /*02a0*/ BSSY B0, 0x620 ; /* 0x0000037000007945 */ /* 0x000fe40003800000 */ /*02b0*/ IMAD.WIDE R4, R4, R5, c[0x2][0x0] ; /* 0x0080000004047625 */ /* 0x000fc800078e0205 */ /*02c0*/ IMAD.IADD R3, R0, 0x1, -R7 ; /* 0x0000000100037824 */ /* 0x000fe200078e0a07 */ /*02d0*/ IADD3 R2, P0, R4.reuse, c[0x0][0x168], RZ ; /* 0x00005a0004027a10 */ /* 0x040fe40007f1e0ff */ /*02e0*/ IADD3 R4, P2, R4, c[0x0][0x160], RZ ; /* 0x0000580004047a10 */ /* 0x000fe40007f5e0ff */ /*02f0*/ ISETP.GT.AND P1, PT, R3, 0xc, PT ; /* 0x0000000c0300780c */ /* 0x000fe40003f24270 */ /*0300*/ IADD3.X R3, R5.reuse, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b0005037a10 */ /* 0x040fe400007fe4ff */ /*0310*/ IADD3.X R5, R5, c[0x0][0x164], RZ, P2, !PT ; /* 0x0000590005057a10 */ /* 0x000fe400017fe4ff */ /*0320*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fce0003f0f070 */ /*0330*/ @!P1 BRA 0x610 ; /* 0x000002d000009947 */ /* 0x000fea0003800000 */ /*0340*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0350*/ IADD3 R6, R6, 0x7f0, RZ ; /* 0x000007f006067810 */ /* 0x000fe40007ffe0ff */ /*0360*/ LDG.E R9, [R2.64+-0x8] ; /* 0xfffff80402097981 */ /* 0x000ea8000c1e1900 */ /*0370*/ STG.E [R4.64+-0x8], R9 ; /* 0xfffff80904007986 */ /* 0x0041e8000c101904 */ /*0380*/ LDG.E R11, [R2.64+-0x4] ; /* 0xfffffc04020b7981 */ /* 0x000ea8000c1e1900 */ /*0390*/ STG.E [R4.64+-0x4], R11 ; /* 0xfffffc0b04007986 */ /* 0x0043e8000c101904 */ /*03a0*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x000ea8000c1e1900 */ /*03b0*/ STG.E [R4.64], R13 ; /* 0x0000000d04007986 */ /* 0x0045e8000c101904 */ /*03c0*/ LDG.E R15, [R2.64+0x4] ; /* 0x00000404020f7981 */ /* 0x000ee8000c1e1900 */ /*03d0*/ STG.E [R4.64+0x4], R15 ; /* 0x0000040f04007986 */ /* 0x0087e8000c101904 */ /*03e0*/ LDG.E R17, [R2.64+0x8] ; /* 0x0000080402117981 */ /* 0x000f28000c1e1900 */ /*03f0*/ STG.E [R4.64+0x8], R17 ; /* 0x0000081104007986 */ /* 0x0109e8000c101904 */ /*0400*/ LDG.E R19, [R2.64+0xc] ; /* 0x00000c0402137981 */ /* 0x000f68000c1e1900 */ /*0410*/ STG.E [R4.64+0xc], R19 ; /* 0x00000c1304007986 */ /* 0x020be8000c101904 */ /*0420*/ LDG.E R9, [R2.64+0x10] ; /* 0x0000100402097981 */ /* 0x001ea8000c1e1900 */ /*0430*/ STG.E [R4.64+0x10], R9 ; /* 0x0000100904007986 */ /* 0x0041e8000c101904 */ /*0440*/ LDG.E R11, [R2.64+0x14] ; /* 0x00001404020b7981 */ /* 0x002ea8000c1e1900 */ /*0450*/ STG.E [R4.64+0x14], R11 ; /* 0x0000140b04007986 */ /* 0x0043e8000c101904 */ /*0460*/ LDG.E R13, [R2.64+0x18] ; /* 0x00001804020d7981 */ /* 0x000ea8000c1e1900 */ /*0470*/ STG.E [R4.64+0x18], R13 ; /* 0x0000180d04007986 */ /* 0x0045e8000c101904 */ /*0480*/ LDG.E R15, [R2.64+0x1c] ; /* 0x00001c04020f7981 */ /* 0x008ee8000c1e1900 */ /*0490*/ STG.E [R4.64+0x1c], R15 ; /* 0x00001c0f04007986 */ /* 0x0087e8000c101904 */ /*04a0*/ LDG.E R17, [R2.64+0x20] ; /* 0x0000200402117981 */ /* 0x010f28000c1e1900 */ /*04b0*/ STG.E [R4.64+0x20], R17 ; /* 0x0000201104007986 */ /* 0x0109e8000c101904 */ /*04c0*/ LDG.E R19, [R2.64+0x24] ; /* 0x0000240402137981 */ /* 0x020f68000c1e1900 */ /*04d0*/ STG.E [R4.64+0x24], R19 ; /* 0x0000241304007986 */ /* 0x020fe8000c101904 */ /*04e0*/ LDG.E R9, [R2.64+0x28] ; /* 0x0000280402097981 */ /* 0x001f68000c1e1900 */ /*04f0*/ STG.E [R4.64+0x28], R9 ; /* 0x0000280904007986 */ /* 0x0201e8000c101904 */ /*0500*/ LDG.E R11, [R2.64+0x2c] ; /* 0x00002c04020b7981 */ /* 0x002f68000c1e1900 */ /*0510*/ STG.E [R4.64+0x2c], R11 ; /* 0x00002c0b04007986 */ /* 0x020fe8000c101904 */ /*0520*/ LDG.E R13, [R2.64+0x30] ; /* 0x00003004020d7981 */ /* 0x004ea2000c1e1900 */ /*0530*/ IADD3 R7, R7, 0x10, RZ ; /* 0x0000001007077810 */ /* 0x000fc80007ffe0ff */ /*0540*/ ISETP.GE.AND P1, PT, R7, R6, PT ; /* 0x000000060700720c */ /* 0x000fe40003f26270 */ /*0550*/ IADD3 R10, P2, R2, 0x40, RZ ; /* 0x00000040020a7810 */ /* 0x000fe20007f5e0ff */ /*0560*/ STG.E [R4.64+0x30], R13 ; /* 0x0000300d04007986 */ /* 0x004fe8000c101904 */ /*0570*/ LDG.E R15, [R2.64+0x34] ; /* 0x00003404020f7981 */ /* 0x0082a2000c1e1900 */ /*0580*/ IADD3 R8, P3, R4, 0x40, RZ ; /* 0x0000004004087810 */ /* 0x000fe20007f7e0ff */ /*0590*/ IMAD.X R17, RZ, RZ, R3, P2 ; /* 0x000000ffff117224 */ /* 0x010fc800010e0603 */ /*05a0*/ IMAD.X R9, RZ, RZ, R5, P3 ; /* 0x000000ffff097224 */ /* 0x001fe200018e0605 */ /*05b0*/ MOV R2, R10 ; /* 0x0000000a00027202 */ /* 0x002fe20000000f00 */ /*05c0*/ IMAD.MOV.U32 R3, RZ, RZ, R17 ; /* 0x000000ffff037224 */ /* 0x000fe200078e0011 */ /*05d0*/ STG.E [R4.64+0x34], R15 ; /* 0x0000340f04007986 */ /* 0x0041e4000c101904 */ /*05e0*/ IMAD.MOV.U32 R4, RZ, RZ, R8 ; /* 0x000000ffff047224 */ /* 0x001fe400078e0008 */ /*05f0*/ IMAD.MOV.U32 R5, RZ, RZ, R9 ; /* 0x000000ffff057224 */ /* 0x000fe200078e0009 */ /*0600*/ @!P1 BRA 0x360 ; /* 0xfffffd5000009947 */ /* 0x000fea000383ffff */ /*0610*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0620*/ IMAD.IADD R6, R0, 0x1, -R7 ; /* 0x0000000100067824 */ /* 0x000fe200078e0a07 */ /*0630*/ BSSY B0, 0x810 ; /* 0x000001d000007945 */ /* 0x000fe80003800000 */ /*0640*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */ /* 0x000fda0003f24270 */ /*0650*/ @!P1 BRA 0x800 ; /* 0x000001a000009947 */ /* 0x000fea0003800000 */ /*0660*/ LDG.E R9, [R2.64+-0x8] ; /* 0xfffff80402097981 */ /* 0x000ea8000c1e1900 */ /*0670*/ STG.E [R4.64+-0x8], R9 ; /* 0xfffff80904007986 */ /* 0x0041e8000c101904 */ /*0680*/ LDG.E R11, [R2.64+-0x4] ; /* 0xfffffc04020b7981 */ /* 0x000ea8000c1e1900 */ /*0690*/ STG.E [R4.64+-0x4], R11 ; /* 0xfffffc0b04007986 */ /* 0x0043e8000c101904 */ /*06a0*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x000ea8000c1e1900 */ /*06b0*/ STG.E [R4.64], R13 ; /* 0x0000000d04007986 */ /* 0x0045e8000c101904 */ /*06c0*/ LDG.E R15, [R2.64+0x4] ; /* 0x00000404020f7981 */ /* 0x000ee8000c1e1900 */ /*06d0*/ STG.E [R4.64+0x4], R15 ; /* 0x0000040f04007986 */ /* 0x0087e8000c101904 */ /*06e0*/ LDG.E R17, [R2.64+0x8] ; /* 0x0000080402117981 */ /* 0x000f28000c1e1900 */ /*06f0*/ STG.E [R4.64+0x8], R17 ; /* 0x0000081104007986 */ /* 0x010fe8000c101904 */ /*0700*/ LDG.E R19, [R2.64+0xc] ; /* 0x00000c0402137981 */ /* 0x000f28000c1e1900 */ /*0710*/ STG.E [R4.64+0xc], R19 ; /* 0x00000c1304007986 */ /* 0x010fe8000c101904 */ /*0720*/ LDG.E R9, [R2.64+0x10] ; /* 0x0000100402097981 */ /* 0x001f22000c1e1900 */ /*0730*/ IADD3 R8, P1, R2, 0x20, RZ ; /* 0x0000002002087810 */ /* 0x000fc40007f3e0ff */ /*0740*/ IADD3 R6, P2, R4, 0x20, RZ ; /* 0x0000002004067810 */ /* 0x000fe20007f5e0ff */ /*0750*/ STG.E [R4.64+0x10], R9 ; /* 0x0000100904007986 */ /* 0x010fe8000c101904 */ /*0760*/ LDG.E R11, [R2.64+0x14] ; /* 0x00001404020b7981 */ /* 0x002122000c1e1900 */ /*0770*/ IADD3.X R13, RZ, R5, RZ, P2, !PT ; /* 0x00000005ff0d7210 */ /* 0x004fe200017fe4ff */ /*0780*/ IMAD.X R15, RZ, RZ, R3, P1 ; /* 0x000000ffff0f7224 */ /* 0x008fe200008e0603 */ /*0790*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*07a0*/ IADD3 R7, R7, 0x8, RZ ; /* 0x0000000807077810 */ /* 0x000fe20007ffe0ff */ /*07b0*/ IMAD.MOV.U32 R2, RZ, RZ, R8 ; /* 0x000000ffff027224 */ /* 0x001fc400078e0008 */ /*07c0*/ IMAD.MOV.U32 R3, RZ, RZ, R15 ; /* 0x000000ffff037224 */ /* 0x000fe200078e000f */ /*07d0*/ STG.E [R4.64+0x14], R11 ; /* 0x0000140b04007986 */ /* 0x0101e4000c101904 */ /*07e0*/ IMAD.MOV.U32 R4, RZ, RZ, R6 ; /* 0x000000ffff047224 */ /* 0x001fe400078e0006 */ /*07f0*/ IMAD.MOV.U32 R5, RZ, RZ, R13 ; /* 0x000000ffff057224 */ /* 0x000fe400078e000d */ /*0800*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0810*/ ISETP.LT.OR P0, PT, R7, R0, P0 ; /* 0x000000000700720c */ /* 0x000fda0000701670 */ /*0820*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0830*/ LDG.E R7, [R2.64+-0x8] ; /* 0xfffff80402077981 */ /* 0x000ea8000c1e1900 */ /*0840*/ STG.E [R4.64+-0x8], R7 ; /* 0xfffff80704007986 */ /* 0x004fe8000c101904 */ /*0850*/ LDG.E R9, [R2.64+-0x4] ; /* 0xfffffc0402097981 */ /* 0x000ea8000c1e1900 */ /*0860*/ STG.E [R4.64+-0x4], R9 ; /* 0xfffffc0904007986 */ /* 0x004fe8000c101904 */ /*0870*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x000ea8000c1e1900 */ /*0880*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */ /* 0x004fe8000c101904 */ /*0890*/ LDG.E R13, [R2.64+0x4] ; /* 0x00000404020d7981 */ /* 0x000ea8000c1e1900 */ /*08a0*/ STG.E [R4.64+0x4], R13 ; /* 0x0000040d04007986 */ /* 0x004fe2000c101904 */ /*08b0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*08c0*/ BRA 0x8c0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*08d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0900*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0910*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0920*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0930*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0940*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0950*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0960*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0970*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cstdio> #include <stdio.h> #include <stdlib.h> #define SIZE 1024*128*512 // int == 4byte // 1GB 256 1kb // 256 1024 1mb // 256 1024 1024 1GB __global__ void input(int *a, int *b) { int i=blockIdx.x*blockDim.x*512 + threadIdx.x*512; int t=i+2048; for(;i<t;i++) { a[i]=b[i]; } } int main(void) { int *arr; int *arr2; int *carr=0; int *carr2=0; arr= (int *)malloc(sizeof(int)*SIZE); arr2= (int *)malloc(sizeof(int)*SIZE); for(int i=0; i<SIZE; i++) { arr[i] = i; } cudaMalloc((void**)&carr2,sizeof(int)*SIZE); cudaMalloc((void**)&carr,sizeof(int)*SIZE); cudaMemcpy(carr,arr,sizeof(int)*SIZE,cudaMemcpyHostToDevice); input<<<256,512>>>(carr2,carr); cudaMemcpy(arr2,carr2,sizeof(int)*SIZE,cudaMemcpyDeviceToHost); cudaFree(carr2); cudaFree(carr); free(arr2); free(arr); return 0; }
.file "tmpxft_00002926_00000000-6_sample.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z26__device_stub__Z5inputPiS_PiS_ .type _Z26__device_stub__Z5inputPiS_PiS_, @function _Z26__device_stub__Z5inputPiS_PiS_: .LFB2082: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z5inputPiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z26__device_stub__Z5inputPiS_PiS_, .-_Z26__device_stub__Z5inputPiS_PiS_ .globl _Z5inputPiS_ .type _Z5inputPiS_, @function _Z5inputPiS_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z5inputPiS_PiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z5inputPiS_, .-_Z5inputPiS_ .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $56, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movq $0, (%rsp) movq $0, 8(%rsp) movl $268435456, %edi call malloc@PLT movq %rax, %rbx movl $268435456, %edi call malloc@PLT movq %rax, %rbp movl $0, %eax .L12: movl %eax, (%rbx,%rax,4) addq $1, %rax cmpq $67108864, %rax jne .L12 leaq 8(%rsp), %rdi movl $268435456, %esi call cudaMalloc@PLT movq %rsp, %rdi movl $268435456, %esi call cudaMalloc@PLT movl $1, %ecx movl $268435456, %edx movq %rbx, %rsi movq (%rsp), %rdi call cudaMemcpy@PLT movl $512, 28(%rsp) movl $1, 32(%rsp) movl $256, 16(%rsp) movl $1, 20(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L17 .L13: movl $2, %ecx movl $268435456, %edx movq 8(%rsp), %rsi movq %rbp, %rdi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq (%rsp), %rdi call cudaFree@PLT movq %rbp, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L18 movl $0, %eax addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L17: .cfi_restore_state movq (%rsp), %rsi movq 8(%rsp), %rdi call _Z26__device_stub__Z5inputPiS_PiS_ jmp .L13 .L18: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z5inputPiS_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z5inputPiS_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cstdio> #include <stdio.h> #include <stdlib.h> #define SIZE 1024*128*512 // int == 4byte // 1GB 256 1kb // 256 1024 1mb // 256 1024 1024 1GB __global__ void input(int *a, int *b) { int i=blockIdx.x*blockDim.x*512 + threadIdx.x*512; int t=i+2048; for(;i<t;i++) { a[i]=b[i]; } } int main(void) { int *arr; int *arr2; int *carr=0; int *carr2=0; arr= (int *)malloc(sizeof(int)*SIZE); arr2= (int *)malloc(sizeof(int)*SIZE); for(int i=0; i<SIZE; i++) { arr[i] = i; } cudaMalloc((void**)&carr2,sizeof(int)*SIZE); cudaMalloc((void**)&carr,sizeof(int)*SIZE); cudaMemcpy(carr,arr,sizeof(int)*SIZE,cudaMemcpyHostToDevice); input<<<256,512>>>(carr2,carr); cudaMemcpy(arr2,carr2,sizeof(int)*SIZE,cudaMemcpyDeviceToHost); cudaFree(carr2); cudaFree(carr); free(arr2); free(arr); return 0; }
#include <hip/hip_runtime.h> #include <cstdio> #include <stdio.h> #include <stdlib.h> #define SIZE 1024*128*512 // int == 4byte // 1GB 256 1kb // 256 1024 1mb // 256 1024 1024 1GB __global__ void input(int *a, int *b) { int i=blockIdx.x*blockDim.x*512 + threadIdx.x*512; int t=i+2048; for(;i<t;i++) { a[i]=b[i]; } } int main(void) { int *arr; int *arr2; int *carr=0; int *carr2=0; arr= (int *)malloc(sizeof(int)*SIZE); arr2= (int *)malloc(sizeof(int)*SIZE); for(int i=0; i<SIZE; i++) { arr[i] = i; } hipMalloc((void**)&carr2,sizeof(int)*SIZE); hipMalloc((void**)&carr,sizeof(int)*SIZE); hipMemcpy(carr,arr,sizeof(int)*SIZE,hipMemcpyHostToDevice); input<<<256,512>>>(carr2,carr); hipMemcpy(arr2,carr2,sizeof(int)*SIZE,hipMemcpyDeviceToHost); hipFree(carr2); hipFree(carr); free(arr2); free(arr); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <cstdio> #include <stdio.h> #include <stdlib.h> #define SIZE 1024*128*512 // int == 4byte // 1GB 256 1kb // 256 1024 1mb // 256 1024 1024 1GB __global__ void input(int *a, int *b) { int i=blockIdx.x*blockDim.x*512 + threadIdx.x*512; int t=i+2048; for(;i<t;i++) { a[i]=b[i]; } } int main(void) { int *arr; int *arr2; int *carr=0; int *carr2=0; arr= (int *)malloc(sizeof(int)*SIZE); arr2= (int *)malloc(sizeof(int)*SIZE); for(int i=0; i<SIZE; i++) { arr[i] = i; } hipMalloc((void**)&carr2,sizeof(int)*SIZE); hipMalloc((void**)&carr,sizeof(int)*SIZE); hipMemcpy(carr,arr,sizeof(int)*SIZE,hipMemcpyHostToDevice); input<<<256,512>>>(carr2,carr); hipMemcpy(arr2,carr2,sizeof(int)*SIZE,hipMemcpyDeviceToHost); hipFree(carr2); hipFree(carr); free(arr2); free(arr); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z5inputPiS_ .globl _Z5inputPiS_ .p2align 8 .type _Z5inputPiS_,@function _Z5inputPiS_: s_clause 0x1 s_load_b32 s4, s[0:1], 0x1c s_load_b128 s[0:3], s[0:1], 0x0 v_lshlrev_b32_e32 v0, 9, v0 s_waitcnt lgkmcnt(0) s_and_b32 s4, s4, 0xffff s_delay_alu instid0(SALU_CYCLE_1) s_mul_i32 s15, s15, s4 s_mov_b32 s4, 0 v_lshl_add_u32 v0, s15, 9, v0 .p2align 6 .LBB0_1: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_add_nc_u32_e32 v1, s4, v0 s_add_i32 s4, s4, 1 s_cmpk_lg_i32 s4, 0x800 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[1:2], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s2, v1 v_add_co_ci_u32_e32 v4, vcc_lo, s3, v2, vcc_lo v_add_co_u32 v1, vcc_lo, s0, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo global_load_b32 v3, v[3:4], off s_waitcnt vmcnt(0) global_store_b32 v[1:2], v3, off s_cbranch_scc1 .LBB0_1 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z5inputPiS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z5inputPiS_, .Lfunc_end0-_Z5inputPiS_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z5inputPiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z5inputPiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <cstdio> #include <stdio.h> #include <stdlib.h> #define SIZE 1024*128*512 // int == 4byte // 1GB 256 1kb // 256 1024 1mb // 256 1024 1024 1GB __global__ void input(int *a, int *b) { int i=blockIdx.x*blockDim.x*512 + threadIdx.x*512; int t=i+2048; for(;i<t;i++) { a[i]=b[i]; } } int main(void) { int *arr; int *arr2; int *carr=0; int *carr2=0; arr= (int *)malloc(sizeof(int)*SIZE); arr2= (int *)malloc(sizeof(int)*SIZE); for(int i=0; i<SIZE; i++) { arr[i] = i; } hipMalloc((void**)&carr2,sizeof(int)*SIZE); hipMalloc((void**)&carr,sizeof(int)*SIZE); hipMemcpy(carr,arr,sizeof(int)*SIZE,hipMemcpyHostToDevice); input<<<256,512>>>(carr2,carr); hipMemcpy(arr2,carr2,sizeof(int)*SIZE,hipMemcpyDeviceToHost); hipFree(carr2); hipFree(carr); free(arr2); free(arr); return 0; }
.text .file "sample.hip" .globl _Z20__device_stub__inputPiS_ # -- Begin function _Z20__device_stub__inputPiS_ .p2align 4, 0x90 .type _Z20__device_stub__inputPiS_,@function _Z20__device_stub__inputPiS_: # @_Z20__device_stub__inputPiS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z5inputPiS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z20__device_stub__inputPiS_, .Lfunc_end0-_Z20__device_stub__inputPiS_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $104, %rsp .cfi_def_cfa_offset 128 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movq $0, 8(%rsp) movq $0, (%rsp) movl $268435456, %edi # imm = 0x10000000 callq malloc movq %rax, %rbx movl $268435456, %edi # imm = 0x10000000 callq malloc movq %rax, %r14 xorl %eax, %eax .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movl %eax, (%rbx,%rax,4) incq %rax cmpq $67108864, %rax # imm = 0x4000000 jne .LBB1_1 # %bb.2: movq %rsp, %rdi movl $268435456, %esi # imm = 0x10000000 callq hipMalloc leaq 8(%rsp), %rdi movl $268435456, %esi # imm = 0x10000000 callq hipMalloc movq 8(%rsp), %rdi movl $268435456, %edx # imm = 0x10000000 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movabsq $4294967552, %rdi # imm = 0x100000100 leaq 256(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_4 # %bb.3: movq (%rsp), %rax movq 8(%rsp), %rcx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z5inputPiS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_4: movq (%rsp), %rsi movl $268435456, %edx # imm = 0x10000000 movq %r14, %rdi movl $2, %ecx callq hipMemcpy movq (%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq %r14, %rdi callq free movq %rbx, %rdi callq free xorl %eax, %eax addq $104, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5inputPiS_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z5inputPiS_,@object # @_Z5inputPiS_ .section .rodata,"a",@progbits .globl _Z5inputPiS_ .p2align 3, 0x0 _Z5inputPiS_: .quad _Z20__device_stub__inputPiS_ .size _Z5inputPiS_, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z5inputPiS_" .size .L__unnamed_1, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z20__device_stub__inputPiS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z5inputPiS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z5inputPiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0030*/ BSSY B0, 0x260 ; /* 0x0000022000007945 */ /* 0x000fe40003800000 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */ /* 0x001fc800078e0203 */ /*0060*/ IMAD.SHL.U32 R6, R6, 0x200, RZ ; /* 0x0000020006067824 */ /* 0x000fc800078e00ff */ /*0070*/ IMAD.MOV.U32 R4, RZ, RZ, R6 ; /* 0x000000ffff047224 */ /* 0x000fe200078e0006 */ /*0080*/ IADD3 R3, R6, 0x7ff, RZ ; /* 0x000007ff06037810 */ /* 0x000fc80007ffe0ff */ /*0090*/ IMNMX R3, R6, R3, !PT ; /* 0x0000000306037217 */ /* 0x000fc80007800200 */ /*00a0*/ IADD3 R0, R3.reuse, 0x1, RZ ; /* 0x0000000103007810 */ /* 0x040fe20007ffe0ff */ /*00b0*/ IMAD.IADD R3, R3, 0x1, -R6 ; /* 0x0000000103037824 */ /* 0x000fc600078e0a06 */ /*00c0*/ LOP3.LUT P1, R0, R0, 0x3, RZ, 0xc0, !PT ; /* 0x0000000300007812 */ /* 0x000fe4000782c0ff */ /*00d0*/ ISETP.GE.U32.AND P0, PT, R3, 0x3, PT ; /* 0x000000030300780c */ /* 0x000fd60003f06070 */ /*00e0*/ @!P1 BRA 0x250 ; /* 0x0000016000009947 */ /* 0x000fea0003800000 */ /*00f0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x000fc800078e00ff */ /*0100*/ IMAD.WIDE R2, R6, R5, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x000fc800078e0205 */ /*0110*/ IMAD.WIDE R4, R6, R5, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x000fc800078e0205 */ /*0120*/ IMAD.MOV.U32 R9, RZ, RZ, R2 ; /* 0x000000ffff097224 */ /* 0x000fe200078e0002 */ /*0130*/ MOV R7, R4 ; /* 0x0000000400077202 */ /* 0x000fe20000000f00 */ /*0140*/ IMAD.MOV.U32 R10, RZ, RZ, R3 ; /* 0x000000ffff0a7224 */ /* 0x000fe400078e0003 */ /*0150*/ IMAD.MOV.U32 R8, RZ, RZ, R5 ; /* 0x000000ffff087224 */ /* 0x000fe400078e0005 */ /*0160*/ IMAD.MOV.U32 R4, RZ, RZ, R6 ; /* 0x000000ffff047224 */ /* 0x000fe400078e0006 */ /*0170*/ IMAD.MOV.U32 R2, RZ, RZ, R7 ; /* 0x000000ffff027224 */ /* 0x001fe400078e0007 */ /*0180*/ IMAD.MOV.U32 R3, RZ, RZ, R8 ; /* 0x000000ffff037224 */ /* 0x000fca00078e0008 */ /*0190*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */ /* 0x0000a2000c1e1900 */ /*01a0*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */ /* 0x000fe40007ffe0ff */ /*01b0*/ IADD3 R7, P3, R7, 0x4, RZ ; /* 0x0000000407077810 */ /* 0x000fe40007f7e0ff */ /*01c0*/ ISETP.NE.AND P1, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fe40003f25270 */ /*01d0*/ IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104047810 */ /* 0x000fe20007ffe0ff */ /*01e0*/ IMAD.X R8, RZ, RZ, R8, P3 ; /* 0x000000ffff087224 */ /* 0x000fe400018e0608 */ /*01f0*/ IMAD.MOV.U32 R2, RZ, RZ, R9 ; /* 0x000000ffff027224 */ /* 0x001fe200078e0009 */ /*0200*/ IADD3 R9, P2, R9, 0x4, RZ ; /* 0x0000000409097810 */ /* 0x000fe20007f5e0ff */ /*0210*/ IMAD.MOV.U32 R3, RZ, RZ, R10 ; /* 0x000000ffff037224 */ /* 0x000fc600078e000a */ /*0220*/ IADD3.X R10, RZ, R10, RZ, P2, !PT ; /* 0x0000000aff0a7210 */ /* 0x000fe400017fe4ff */ /*0230*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x0041e2000c101904 */ /*0240*/ @P1 BRA 0x170 ; /* 0xffffff2000001947 */ /* 0x000fea000383ffff */ /*0250*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0260*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0270*/ IADD3 R7, R4, -0x4, RZ ; /* 0xfffffffc04077810 */ /* 0x000fe20007ffe0ff */ /*0280*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x001fe200078e00ff */ /*0290*/ IADD3 R0, R6, 0x7fc, RZ ; /* 0x000007fc06007810 */ /* 0x000fe20007ffe0ff */ /*02a0*/ BSSY B0, 0x620 ; /* 0x0000037000007945 */ /* 0x000fe40003800000 */ /*02b0*/ IMAD.WIDE R4, R4, R5, c[0x2][0x0] ; /* 0x0080000004047625 */ /* 0x000fc800078e0205 */ /*02c0*/ IMAD.IADD R3, R0, 0x1, -R7 ; /* 0x0000000100037824 */ /* 0x000fe200078e0a07 */ /*02d0*/ IADD3 R2, P0, R4.reuse, c[0x0][0x168], RZ ; /* 0x00005a0004027a10 */ /* 0x040fe40007f1e0ff */ /*02e0*/ IADD3 R4, P2, R4, c[0x0][0x160], RZ ; /* 0x0000580004047a10 */ /* 0x000fe40007f5e0ff */ /*02f0*/ ISETP.GT.AND P1, PT, R3, 0xc, PT ; /* 0x0000000c0300780c */ /* 0x000fe40003f24270 */ /*0300*/ IADD3.X R3, R5.reuse, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b0005037a10 */ /* 0x040fe400007fe4ff */ /*0310*/ IADD3.X R5, R5, c[0x0][0x164], RZ, P2, !PT ; /* 0x0000590005057a10 */ /* 0x000fe400017fe4ff */ /*0320*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fce0003f0f070 */ /*0330*/ @!P1 BRA 0x610 ; /* 0x000002d000009947 */ /* 0x000fea0003800000 */ /*0340*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0350*/ IADD3 R6, R6, 0x7f0, RZ ; /* 0x000007f006067810 */ /* 0x000fe40007ffe0ff */ /*0360*/ LDG.E R9, [R2.64+-0x8] ; /* 0xfffff80402097981 */ /* 0x000ea8000c1e1900 */ /*0370*/ STG.E [R4.64+-0x8], R9 ; /* 0xfffff80904007986 */ /* 0x0041e8000c101904 */ /*0380*/ LDG.E R11, [R2.64+-0x4] ; /* 0xfffffc04020b7981 */ /* 0x000ea8000c1e1900 */ /*0390*/ STG.E [R4.64+-0x4], R11 ; /* 0xfffffc0b04007986 */ /* 0x0043e8000c101904 */ /*03a0*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x000ea8000c1e1900 */ /*03b0*/ STG.E [R4.64], R13 ; /* 0x0000000d04007986 */ /* 0x0045e8000c101904 */ /*03c0*/ LDG.E R15, [R2.64+0x4] ; /* 0x00000404020f7981 */ /* 0x000ee8000c1e1900 */ /*03d0*/ STG.E [R4.64+0x4], R15 ; /* 0x0000040f04007986 */ /* 0x0087e8000c101904 */ /*03e0*/ LDG.E R17, [R2.64+0x8] ; /* 0x0000080402117981 */ /* 0x000f28000c1e1900 */ /*03f0*/ STG.E [R4.64+0x8], R17 ; /* 0x0000081104007986 */ /* 0x0109e8000c101904 */ /*0400*/ LDG.E R19, [R2.64+0xc] ; /* 0x00000c0402137981 */ /* 0x000f68000c1e1900 */ /*0410*/ STG.E [R4.64+0xc], R19 ; /* 0x00000c1304007986 */ /* 0x020be8000c101904 */ /*0420*/ LDG.E R9, [R2.64+0x10] ; /* 0x0000100402097981 */ /* 0x001ea8000c1e1900 */ /*0430*/ STG.E [R4.64+0x10], R9 ; /* 0x0000100904007986 */ /* 0x0041e8000c101904 */ /*0440*/ LDG.E R11, [R2.64+0x14] ; /* 0x00001404020b7981 */ /* 0x002ea8000c1e1900 */ /*0450*/ STG.E [R4.64+0x14], R11 ; /* 0x0000140b04007986 */ /* 0x0043e8000c101904 */ /*0460*/ LDG.E R13, [R2.64+0x18] ; /* 0x00001804020d7981 */ /* 0x000ea8000c1e1900 */ /*0470*/ STG.E [R4.64+0x18], R13 ; /* 0x0000180d04007986 */ /* 0x0045e8000c101904 */ /*0480*/ LDG.E R15, [R2.64+0x1c] ; /* 0x00001c04020f7981 */ /* 0x008ee8000c1e1900 */ /*0490*/ STG.E [R4.64+0x1c], R15 ; /* 0x00001c0f04007986 */ /* 0x0087e8000c101904 */ /*04a0*/ LDG.E R17, [R2.64+0x20] ; /* 0x0000200402117981 */ /* 0x010f28000c1e1900 */ /*04b0*/ STG.E [R4.64+0x20], R17 ; /* 0x0000201104007986 */ /* 0x0109e8000c101904 */ /*04c0*/ LDG.E R19, [R2.64+0x24] ; /* 0x0000240402137981 */ /* 0x020f68000c1e1900 */ /*04d0*/ STG.E [R4.64+0x24], R19 ; /* 0x0000241304007986 */ /* 0x020fe8000c101904 */ /*04e0*/ LDG.E R9, [R2.64+0x28] ; /* 0x0000280402097981 */ /* 0x001f68000c1e1900 */ /*04f0*/ STG.E [R4.64+0x28], R9 ; /* 0x0000280904007986 */ /* 0x0201e8000c101904 */ /*0500*/ LDG.E R11, [R2.64+0x2c] ; /* 0x00002c04020b7981 */ /* 0x002f68000c1e1900 */ /*0510*/ STG.E [R4.64+0x2c], R11 ; /* 0x00002c0b04007986 */ /* 0x020fe8000c101904 */ /*0520*/ LDG.E R13, [R2.64+0x30] ; /* 0x00003004020d7981 */ /* 0x004ea2000c1e1900 */ /*0530*/ IADD3 R7, R7, 0x10, RZ ; /* 0x0000001007077810 */ /* 0x000fc80007ffe0ff */ /*0540*/ ISETP.GE.AND P1, PT, R7, R6, PT ; /* 0x000000060700720c */ /* 0x000fe40003f26270 */ /*0550*/ IADD3 R10, P2, R2, 0x40, RZ ; /* 0x00000040020a7810 */ /* 0x000fe20007f5e0ff */ /*0560*/ STG.E [R4.64+0x30], R13 ; /* 0x0000300d04007986 */ /* 0x004fe8000c101904 */ /*0570*/ LDG.E R15, [R2.64+0x34] ; /* 0x00003404020f7981 */ /* 0x0082a2000c1e1900 */ /*0580*/ IADD3 R8, P3, R4, 0x40, RZ ; /* 0x0000004004087810 */ /* 0x000fe20007f7e0ff */ /*0590*/ IMAD.X R17, RZ, RZ, R3, P2 ; /* 0x000000ffff117224 */ /* 0x010fc800010e0603 */ /*05a0*/ IMAD.X R9, RZ, RZ, R5, P3 ; /* 0x000000ffff097224 */ /* 0x001fe200018e0605 */ /*05b0*/ MOV R2, R10 ; /* 0x0000000a00027202 */ /* 0x002fe20000000f00 */ /*05c0*/ IMAD.MOV.U32 R3, RZ, RZ, R17 ; /* 0x000000ffff037224 */ /* 0x000fe200078e0011 */ /*05d0*/ STG.E [R4.64+0x34], R15 ; /* 0x0000340f04007986 */ /* 0x0041e4000c101904 */ /*05e0*/ IMAD.MOV.U32 R4, RZ, RZ, R8 ; /* 0x000000ffff047224 */ /* 0x001fe400078e0008 */ /*05f0*/ IMAD.MOV.U32 R5, RZ, RZ, R9 ; /* 0x000000ffff057224 */ /* 0x000fe200078e0009 */ /*0600*/ @!P1 BRA 0x360 ; /* 0xfffffd5000009947 */ /* 0x000fea000383ffff */ /*0610*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0620*/ IMAD.IADD R6, R0, 0x1, -R7 ; /* 0x0000000100067824 */ /* 0x000fe200078e0a07 */ /*0630*/ BSSY B0, 0x810 ; /* 0x000001d000007945 */ /* 0x000fe80003800000 */ /*0640*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */ /* 0x000fda0003f24270 */ /*0650*/ @!P1 BRA 0x800 ; /* 0x000001a000009947 */ /* 0x000fea0003800000 */ /*0660*/ LDG.E R9, [R2.64+-0x8] ; /* 0xfffff80402097981 */ /* 0x000ea8000c1e1900 */ /*0670*/ STG.E [R4.64+-0x8], R9 ; /* 0xfffff80904007986 */ /* 0x0041e8000c101904 */ /*0680*/ LDG.E R11, [R2.64+-0x4] ; /* 0xfffffc04020b7981 */ /* 0x000ea8000c1e1900 */ /*0690*/ STG.E [R4.64+-0x4], R11 ; /* 0xfffffc0b04007986 */ /* 0x0043e8000c101904 */ /*06a0*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x000ea8000c1e1900 */ /*06b0*/ STG.E [R4.64], R13 ; /* 0x0000000d04007986 */ /* 0x0045e8000c101904 */ /*06c0*/ LDG.E R15, [R2.64+0x4] ; /* 0x00000404020f7981 */ /* 0x000ee8000c1e1900 */ /*06d0*/ STG.E [R4.64+0x4], R15 ; /* 0x0000040f04007986 */ /* 0x0087e8000c101904 */ /*06e0*/ LDG.E R17, [R2.64+0x8] ; /* 0x0000080402117981 */ /* 0x000f28000c1e1900 */ /*06f0*/ STG.E [R4.64+0x8], R17 ; /* 0x0000081104007986 */ /* 0x010fe8000c101904 */ /*0700*/ LDG.E R19, [R2.64+0xc] ; /* 0x00000c0402137981 */ /* 0x000f28000c1e1900 */ /*0710*/ STG.E [R4.64+0xc], R19 ; /* 0x00000c1304007986 */ /* 0x010fe8000c101904 */ /*0720*/ LDG.E R9, [R2.64+0x10] ; /* 0x0000100402097981 */ /* 0x001f22000c1e1900 */ /*0730*/ IADD3 R8, P1, R2, 0x20, RZ ; /* 0x0000002002087810 */ /* 0x000fc40007f3e0ff */ /*0740*/ IADD3 R6, P2, R4, 0x20, RZ ; /* 0x0000002004067810 */ /* 0x000fe20007f5e0ff */ /*0750*/ STG.E [R4.64+0x10], R9 ; /* 0x0000100904007986 */ /* 0x010fe8000c101904 */ /*0760*/ LDG.E R11, [R2.64+0x14] ; /* 0x00001404020b7981 */ /* 0x002122000c1e1900 */ /*0770*/ IADD3.X R13, RZ, R5, RZ, P2, !PT ; /* 0x00000005ff0d7210 */ /* 0x004fe200017fe4ff */ /*0780*/ IMAD.X R15, RZ, RZ, R3, P1 ; /* 0x000000ffff0f7224 */ /* 0x008fe200008e0603 */ /*0790*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*07a0*/ IADD3 R7, R7, 0x8, RZ ; /* 0x0000000807077810 */ /* 0x000fe20007ffe0ff */ /*07b0*/ IMAD.MOV.U32 R2, RZ, RZ, R8 ; /* 0x000000ffff027224 */ /* 0x001fc400078e0008 */ /*07c0*/ IMAD.MOV.U32 R3, RZ, RZ, R15 ; /* 0x000000ffff037224 */ /* 0x000fe200078e000f */ /*07d0*/ STG.E [R4.64+0x14], R11 ; /* 0x0000140b04007986 */ /* 0x0101e4000c101904 */ /*07e0*/ IMAD.MOV.U32 R4, RZ, RZ, R6 ; /* 0x000000ffff047224 */ /* 0x001fe400078e0006 */ /*07f0*/ IMAD.MOV.U32 R5, RZ, RZ, R13 ; /* 0x000000ffff057224 */ /* 0x000fe400078e000d */ /*0800*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0810*/ ISETP.LT.OR P0, PT, R7, R0, P0 ; /* 0x000000000700720c */ /* 0x000fda0000701670 */ /*0820*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0830*/ LDG.E R7, [R2.64+-0x8] ; /* 0xfffff80402077981 */ /* 0x000ea8000c1e1900 */ /*0840*/ STG.E [R4.64+-0x8], R7 ; /* 0xfffff80704007986 */ /* 0x004fe8000c101904 */ /*0850*/ LDG.E R9, [R2.64+-0x4] ; /* 0xfffffc0402097981 */ /* 0x000ea8000c1e1900 */ /*0860*/ STG.E [R4.64+-0x4], R9 ; /* 0xfffffc0904007986 */ /* 0x004fe8000c101904 */ /*0870*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x000ea8000c1e1900 */ /*0880*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */ /* 0x004fe8000c101904 */ /*0890*/ LDG.E R13, [R2.64+0x4] ; /* 0x00000404020d7981 */ /* 0x000ea8000c1e1900 */ /*08a0*/ STG.E [R4.64+0x4], R13 ; /* 0x0000040d04007986 */ /* 0x004fe2000c101904 */ /*08b0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*08c0*/ BRA 0x8c0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*08d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0900*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0910*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0920*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0930*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0940*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0950*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0960*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0970*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z5inputPiS_ .globl _Z5inputPiS_ .p2align 8 .type _Z5inputPiS_,@function _Z5inputPiS_: s_clause 0x1 s_load_b32 s4, s[0:1], 0x1c s_load_b128 s[0:3], s[0:1], 0x0 v_lshlrev_b32_e32 v0, 9, v0 s_waitcnt lgkmcnt(0) s_and_b32 s4, s4, 0xffff s_delay_alu instid0(SALU_CYCLE_1) s_mul_i32 s15, s15, s4 s_mov_b32 s4, 0 v_lshl_add_u32 v0, s15, 9, v0 .p2align 6 .LBB0_1: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_add_nc_u32_e32 v1, s4, v0 s_add_i32 s4, s4, 1 s_cmpk_lg_i32 s4, 0x800 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[1:2], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s2, v1 v_add_co_ci_u32_e32 v4, vcc_lo, s3, v2, vcc_lo v_add_co_u32 v1, vcc_lo, s0, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo global_load_b32 v3, v[3:4], off s_waitcnt vmcnt(0) global_store_b32 v[1:2], v3, off s_cbranch_scc1 .LBB0_1 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z5inputPiS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z5inputPiS_, .Lfunc_end0-_Z5inputPiS_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z5inputPiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z5inputPiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00002926_00000000-6_sample.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z26__device_stub__Z5inputPiS_PiS_ .type _Z26__device_stub__Z5inputPiS_PiS_, @function _Z26__device_stub__Z5inputPiS_PiS_: .LFB2082: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z5inputPiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z26__device_stub__Z5inputPiS_PiS_, .-_Z26__device_stub__Z5inputPiS_PiS_ .globl _Z5inputPiS_ .type _Z5inputPiS_, @function _Z5inputPiS_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z5inputPiS_PiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z5inputPiS_, .-_Z5inputPiS_ .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $56, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movq $0, (%rsp) movq $0, 8(%rsp) movl $268435456, %edi call malloc@PLT movq %rax, %rbx movl $268435456, %edi call malloc@PLT movq %rax, %rbp movl $0, %eax .L12: movl %eax, (%rbx,%rax,4) addq $1, %rax cmpq $67108864, %rax jne .L12 leaq 8(%rsp), %rdi movl $268435456, %esi call cudaMalloc@PLT movq %rsp, %rdi movl $268435456, %esi call cudaMalloc@PLT movl $1, %ecx movl $268435456, %edx movq %rbx, %rsi movq (%rsp), %rdi call cudaMemcpy@PLT movl $512, 28(%rsp) movl $1, 32(%rsp) movl $256, 16(%rsp) movl $1, 20(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L17 .L13: movl $2, %ecx movl $268435456, %edx movq 8(%rsp), %rsi movq %rbp, %rdi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq (%rsp), %rdi call cudaFree@PLT movq %rbp, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L18 movl $0, %eax addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L17: .cfi_restore_state movq (%rsp), %rsi movq 8(%rsp), %rdi call _Z26__device_stub__Z5inputPiS_PiS_ jmp .L13 .L18: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z5inputPiS_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z5inputPiS_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "sample.hip" .globl _Z20__device_stub__inputPiS_ # -- Begin function _Z20__device_stub__inputPiS_ .p2align 4, 0x90 .type _Z20__device_stub__inputPiS_,@function _Z20__device_stub__inputPiS_: # @_Z20__device_stub__inputPiS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z5inputPiS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z20__device_stub__inputPiS_, .Lfunc_end0-_Z20__device_stub__inputPiS_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $104, %rsp .cfi_def_cfa_offset 128 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movq $0, 8(%rsp) movq $0, (%rsp) movl $268435456, %edi # imm = 0x10000000 callq malloc movq %rax, %rbx movl $268435456, %edi # imm = 0x10000000 callq malloc movq %rax, %r14 xorl %eax, %eax .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movl %eax, (%rbx,%rax,4) incq %rax cmpq $67108864, %rax # imm = 0x4000000 jne .LBB1_1 # %bb.2: movq %rsp, %rdi movl $268435456, %esi # imm = 0x10000000 callq hipMalloc leaq 8(%rsp), %rdi movl $268435456, %esi # imm = 0x10000000 callq hipMalloc movq 8(%rsp), %rdi movl $268435456, %edx # imm = 0x10000000 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movabsq $4294967552, %rdi # imm = 0x100000100 leaq 256(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_4 # %bb.3: movq (%rsp), %rax movq 8(%rsp), %rcx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z5inputPiS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_4: movq (%rsp), %rsi movl $268435456, %edx # imm = 0x10000000 movq %r14, %rdi movl $2, %ecx callq hipMemcpy movq (%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq %r14, %rdi callq free movq %rbx, %rdi callq free xorl %eax, %eax addq $104, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5inputPiS_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z5inputPiS_,@object # @_Z5inputPiS_ .section .rodata,"a",@progbits .globl _Z5inputPiS_ .p2align 3, 0x0 _Z5inputPiS_: .quad _Z20__device_stub__inputPiS_ .size _Z5inputPiS_, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z5inputPiS_" .size .L__unnamed_1, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z20__device_stub__inputPiS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z5inputPiS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float* var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) { for (int i=0; i < var_1; ++i) { if (comp >= cosf((-1.2614E-37f + +1.0591E35f))) { float tmp_1 = +1.4818E-37f; float tmp_2 = (-1.7030E-5f + logf((-1.1908E36f + tanhf(-0.0f)))); comp = tmp_2 / tmp_1 - coshf((var_3 + floorf(var_4 / asinf((var_5 - var_6 + var_7))))); comp += fmodf((+1.1795E-8f / (var_8 - (+1.6709E-36f - (-1.4408E1f + var_9)))), -0.0f - var_10); for (int i=0; i < var_2; ++i) { comp = (var_12 * var_13 - +1.3634E34f - +1.5654E34f); var_11[i] = +1.7711E-37f; float tmp_3 = (var_14 + var_15); comp = tmp_3 + var_11[i] - var_16 / -1.9434E-13f + acosf(var_17 / (var_18 / (-0.0f + -1.7130E-12f + sinhf((var_19 - var_20 * var_21 - (var_22 / var_23)))))); } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float* tmp_12 = initPointer( atof(argv[12]) ); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24); cudaDeviceSynchronize(); return 0; }
.file "tmpxft_000a9d3e_00000000-6_test.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z11initPointerf .type _Z11initPointerf, @function _Z11initPointerf: .LFB2057: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movd %xmm0, %ebx movl $40, %edi call malloc@PLT movq %rax, %rdx leaq 40(%rax), %rcx .L4: movl %ebx, (%rdx) addq $4, %rdx cmpq %rcx, %rdx jne .L4 popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z11initPointerf, .-_Z11initPointerf .globl _Z49__device_stub__Z7computefiiffffffffPffffffffffffffiiffffffffPfffffffffffff .type _Z49__device_stub__Z7computefiiffffffffPffffffffffffffiiffffffffPfffffffffffff, @function _Z49__device_stub__Z7computefiiffffffffPffffffffffffffiiffffffffPfffffffffffff: .LFB2083: .cfi_startproc endbr64 subq $328, %rsp .cfi_def_cfa_offset 336 movss %xmm0, 44(%rsp) movl %edi, 40(%rsp) movl %esi, 36(%rsp) movss %xmm1, 32(%rsp) movss %xmm2, 28(%rsp) movss %xmm3, 24(%rsp) movss %xmm4, 20(%rsp) movss %xmm5, 16(%rsp) movss %xmm6, 12(%rsp) movss %xmm7, 8(%rsp) movq %rdx, (%rsp) movq %fs:40, %rax movq %rax, 312(%rsp) xorl %eax, %eax leaq 44(%rsp), %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rax movq %rax, 120(%rsp) leaq 36(%rsp), %rax movq %rax, 128(%rsp) leaq 32(%rsp), %rax movq %rax, 136(%rsp) leaq 28(%rsp), %rax movq %rax, 144(%rsp) leaq 24(%rsp), %rax movq %rax, 152(%rsp) leaq 20(%rsp), %rax movq %rax, 160(%rsp) leaq 16(%rsp), %rax movq %rax, 168(%rsp) leaq 12(%rsp), %rax movq %rax, 176(%rsp) leaq 8(%rsp), %rax movq %rax, 184(%rsp) leaq 336(%rsp), %rax movq %rax, 192(%rsp) movq %rsp, %rax movq %rax, 200(%rsp) leaq 344(%rsp), %rax movq %rax, 208(%rsp) leaq 352(%rsp), %rax movq %rax, 216(%rsp) leaq 360(%rsp), %rax movq %rax, 224(%rsp) leaq 368(%rsp), %rax movq %rax, 232(%rsp) leaq 376(%rsp), %rax movq %rax, 240(%rsp) leaq 384(%rsp), %rax movq %rax, 248(%rsp) leaq 392(%rsp), %rax movq %rax, 256(%rsp) leaq 400(%rsp), %rax movq %rax, 264(%rsp) leaq 408(%rsp), %rax movq %rax, 272(%rsp) leaq 416(%rsp), %rax movq %rax, 280(%rsp) leaq 424(%rsp), %rax movq %rax, 288(%rsp) leaq 432(%rsp), %rax movq %rax, 296(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L11 .L7: movq 312(%rsp), %rax subq %fs:40, %rax jne .L12 addq $328, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 344 pushq 56(%rsp) .cfi_def_cfa_offset 352 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z7computefiiffffffffPfffffffffffff(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 336 jmp .L7 .L12: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z49__device_stub__Z7computefiiffffffffPffffffffffffffiiffffffffPfffffffffffff, .-_Z49__device_stub__Z7computefiiffffffffPffffffffffffffiiffffffffPfffffffffffff .globl _Z7computefiiffffffffPfffffffffffff .type _Z7computefiiffffffffPfffffffffffff, @function _Z7computefiiffffffffPfffffffffffff: .LFB2084: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movss 224(%rsp), %xmm8 movss %xmm8, 96(%rsp) movss 216(%rsp), %xmm8 movss %xmm8, 88(%rsp) movss 208(%rsp), %xmm8 movss %xmm8, 80(%rsp) movss 200(%rsp), %xmm8 movss %xmm8, 72(%rsp) movss 192(%rsp), %xmm8 movss %xmm8, 64(%rsp) movss 184(%rsp), %xmm8 movss %xmm8, 56(%rsp) movss 176(%rsp), %xmm8 movss %xmm8, 48(%rsp) movss 168(%rsp), %xmm8 movss %xmm8, 40(%rsp) movss 160(%rsp), %xmm8 movss %xmm8, 32(%rsp) movss 152(%rsp), %xmm8 movss %xmm8, 24(%rsp) movss 144(%rsp), %xmm8 movss %xmm8, 16(%rsp) movss 136(%rsp), %xmm8 movss %xmm8, 8(%rsp) movss 128(%rsp), %xmm8 movss %xmm8, (%rsp) call _Z49__device_stub__Z7computefiiffffffffPffffffffffffffiiffffffffPfffffffffffff addq $120, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z7computefiiffffffffPfffffffffffff, .-_Z7computefiiffffffffPfffffffffffff .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $216, %rsp .cfi_def_cfa_offset 256 movq %rsi, %rbx movq 8(%rsi), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 168(%rsp) movq 16(%rbx), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %rbp movq 24(%rbx), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %r12 movq 32(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 160(%rsp) movq 40(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 152(%rsp) movq 48(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 144(%rsp) movq 56(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 136(%rsp) movq 64(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 128(%rsp) movq 72(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 120(%rsp) movq 80(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 112(%rsp) movq 88(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 104(%rsp) movq 96(%rbx), %rdi movl $0, %esi call strtod@PLT cvtsd2ss %xmm0, %xmm0 call _Z11initPointerf movq %rax, %r13 movq 104(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 96(%rsp) movq 112(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 88(%rsp) movq 120(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 80(%rsp) movq 128(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 72(%rsp) movq 136(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 64(%rsp) movq 144(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 56(%rsp) movq 152(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 48(%rsp) movq 160(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 40(%rsp) movq 168(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 32(%rsp) movq 176(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 24(%rsp) movq 184(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 16(%rsp) movq 192(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 8(%rsp) movl $1, 196(%rsp) movl $1, 200(%rsp) movl $1, 184(%rsp) movl $1, 188(%rsp) movl $0, %r9d movl $0, %r8d movq 196(%rsp), %rdx movl $1, %ecx movq 184(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L18 .L16: call cudaDeviceSynchronize@PLT movl $0, %eax addq $216, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L18: .cfi_restore_state pxor %xmm0, %xmm0 cvtsd2ss 168(%rsp), %xmm0 subq $112, %rsp .cfi_def_cfa_offset 368 pxor %xmm1, %xmm1 cvtsd2ss 120(%rsp), %xmm1 movss %xmm1, 96(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 128(%rsp), %xmm1 movss %xmm1, 88(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 136(%rsp), %xmm1 movss %xmm1, 80(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 144(%rsp), %xmm1 movss %xmm1, 72(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 152(%rsp), %xmm1 movss %xmm1, 64(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 160(%rsp), %xmm1 movss %xmm1, 56(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 168(%rsp), %xmm1 movss %xmm1, 48(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 176(%rsp), %xmm1 movss %xmm1, 40(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 184(%rsp), %xmm1 movss %xmm1, 32(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 192(%rsp), %xmm1 movss %xmm1, 24(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 200(%rsp), %xmm1 movss %xmm1, 16(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 208(%rsp), %xmm1 movss %xmm1, 8(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 216(%rsp), %xmm1 movss %xmm1, (%rsp) movq %r13, %rdx pxor %xmm7, %xmm7 cvtsd2ss 224(%rsp), %xmm7 pxor %xmm6, %xmm6 cvtsd2ss 232(%rsp), %xmm6 pxor %xmm5, %xmm5 cvtsd2ss 240(%rsp), %xmm5 pxor %xmm4, %xmm4 cvtsd2ss 248(%rsp), %xmm4 pxor %xmm3, %xmm3 cvtsd2ss 256(%rsp), %xmm3 pxor %xmm2, %xmm2 cvtsd2ss 264(%rsp), %xmm2 pxor %xmm1, %xmm1 cvtsd2ss 272(%rsp), %xmm1 movl %r12d, %esi movl %ebp, %edi call _Z49__device_stub__Z7computefiiffffffffPffffffffffffffiiffffffffPfffffffffffff addq $112, %rsp .cfi_def_cfa_offset 256 jmp .L16 .cfi_endproc .LFE2058: .size main, .-main .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z7computefiiffffffffPfffffffffffff" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z7computefiiffffffffPfffffffffffff(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float* var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) { for (int i=0; i < var_1; ++i) { if (comp >= cosf((-1.2614E-37f + +1.0591E35f))) { float tmp_1 = +1.4818E-37f; float tmp_2 = (-1.7030E-5f + logf((-1.1908E36f + tanhf(-0.0f)))); comp = tmp_2 / tmp_1 - coshf((var_3 + floorf(var_4 / asinf((var_5 - var_6 + var_7))))); comp += fmodf((+1.1795E-8f / (var_8 - (+1.6709E-36f - (-1.4408E1f + var_9)))), -0.0f - var_10); for (int i=0; i < var_2; ++i) { comp = (var_12 * var_13 - +1.3634E34f - +1.5654E34f); var_11[i] = +1.7711E-37f; float tmp_3 = (var_14 + var_15); comp = tmp_3 + var_11[i] - var_16 / -1.9434E-13f + acosf(var_17 / (var_18 / (-0.0f + -1.7130E-12f + sinhf((var_19 - var_20 * var_21 - (var_22 / var_23)))))); } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float* tmp_12 = initPointer( atof(argv[12]) ); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24); cudaDeviceSynchronize(); return 0; }
/* This is a automatically generated test. Do not modify */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float* var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) { for (int i=0; i < var_1; ++i) { if (comp >= cosf((-1.2614E-37f + +1.0591E35f))) { float tmp_1 = +1.4818E-37f; float tmp_2 = (-1.7030E-5f + logf((-1.1908E36f + tanhf(-0.0f)))); comp = tmp_2 / tmp_1 - coshf((var_3 + floorf(var_4 / asinf((var_5 - var_6 + var_7))))); comp += fmodf((+1.1795E-8f / (var_8 - (+1.6709E-36f - (-1.4408E1f + var_9)))), -0.0f - var_10); for (int i=0; i < var_2; ++i) { comp = (var_12 * var_13 - +1.3634E34f - +1.5654E34f); var_11[i] = +1.7711E-37f; float tmp_3 = (var_14 + var_15); comp = tmp_3 + var_11[i] - var_16 / -1.9434E-13f + acosf(var_17 / (var_18 / (-0.0f + -1.7130E-12f + sinhf((var_19 - var_20 * var_21 - (var_22 / var_23)))))); } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float* tmp_12 = initPointer( atof(argv[12]) ); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24); hipDeviceSynchronize(); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* This is a automatically generated test. Do not modify */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float* var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) { for (int i=0; i < var_1; ++i) { if (comp >= cosf((-1.2614E-37f + +1.0591E35f))) { float tmp_1 = +1.4818E-37f; float tmp_2 = (-1.7030E-5f + logf((-1.1908E36f + tanhf(-0.0f)))); comp = tmp_2 / tmp_1 - coshf((var_3 + floorf(var_4 / asinf((var_5 - var_6 + var_7))))); comp += fmodf((+1.1795E-8f / (var_8 - (+1.6709E-36f - (-1.4408E1f + var_9)))), -0.0f - var_10); for (int i=0; i < var_2; ++i) { comp = (var_12 * var_13 - +1.3634E34f - +1.5654E34f); var_11[i] = +1.7711E-37f; float tmp_3 = (var_14 + var_15); comp = tmp_3 + var_11[i] - var_16 / -1.9434E-13f + acosf(var_17 / (var_18 / (-0.0f + -1.7130E-12f + sinhf((var_19 - var_20 * var_21 - (var_22 / var_23)))))); } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float* tmp_12 = initPointer( atof(argv[12]) ); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24); hipDeviceSynchronize(); return 0; }
.text .file "test.hip" .globl _Z22__device_stub__computefiiffffffffPfffffffffffff # -- Begin function _Z22__device_stub__computefiiffffffffPfffffffffffff .p2align 4, 0x90 .type _Z22__device_stub__computefiiffffffffPfffffffffffff,@function _Z22__device_stub__computefiiffffffffPfffffffffffff: # @_Z22__device_stub__computefiiffffffffPfffffffffffff .cfi_startproc # %bb.0: subq $296, %rsp # imm = 0x128 .cfi_def_cfa_offset 304 movss %xmm0, 36(%rsp) movl %edi, 32(%rsp) movl %esi, 28(%rsp) movss %xmm1, 24(%rsp) movss %xmm2, 20(%rsp) movss %xmm3, 16(%rsp) movss %xmm4, 12(%rsp) movss %xmm5, 8(%rsp) movss %xmm6, 4(%rsp) movss %xmm7, (%rsp) movq %rdx, 88(%rsp) leaq 36(%rsp), %rax movq %rax, 96(%rsp) leaq 32(%rsp), %rax movq %rax, 104(%rsp) leaq 28(%rsp), %rax movq %rax, 112(%rsp) leaq 24(%rsp), %rax movq %rax, 120(%rsp) leaq 20(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 12(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) leaq 4(%rsp), %rax movq %rax, 160(%rsp) movq %rsp, %rax movq %rax, 168(%rsp) leaq 304(%rsp), %rax movq %rax, 176(%rsp) leaq 88(%rsp), %rax movq %rax, 184(%rsp) leaq 312(%rsp), %rax movq %rax, 192(%rsp) leaq 320(%rsp), %rax movq %rax, 200(%rsp) leaq 328(%rsp), %rax movq %rax, 208(%rsp) leaq 336(%rsp), %rax movq %rax, 216(%rsp) leaq 344(%rsp), %rax movq %rax, 224(%rsp) leaq 352(%rsp), %rax movq %rax, 232(%rsp) leaq 360(%rsp), %rax movq %rax, 240(%rsp) leaq 368(%rsp), %rax movq %rax, 248(%rsp) leaq 376(%rsp), %rax movq %rax, 256(%rsp) leaq 384(%rsp), %rax movq %rax, 264(%rsp) leaq 392(%rsp), %rax movq %rax, 272(%rsp) leaq 400(%rsp), %rax movq %rax, 280(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z7computefiiffffffffPfffffffffffff, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $312, %rsp # imm = 0x138 .cfi_adjust_cfa_offset -312 retq .Lfunc_end0: .size _Z22__device_stub__computefiiffffffffPfffffffffffff, .Lfunc_end0-_Z22__device_stub__computefiiffffffffPfffffffffffff .cfi_endproc # -- End function .globl _Z11initPointerf # -- Begin function _Z11initPointerf .p2align 4, 0x90 .type _Z11initPointerf,@function _Z11initPointerf: # @_Z11initPointerf .cfi_startproc # %bb.0: pushq %rax .cfi_def_cfa_offset 16 movss %xmm0, 4(%rsp) # 4-byte Spill movl $40, %edi callq malloc movss 4(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero xorl %ecx, %ecx .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movss %xmm0, (%rax,%rcx,4) incq %rcx cmpq $10, %rcx jne .LBB1_1 # %bb.2: popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z11initPointerf, .Lfunc_end1-_Z11initPointerf .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $272, %rsp # imm = 0x110 .cfi_def_cfa_offset 320 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rsi, %r15 movq 8(%rsi), %rdi xorl %r13d, %r13d xorl %esi, %esi callq strtod movsd %xmm0, 264(%rsp) # 8-byte Spill movq 16(%r15), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %rbx movq 24(%r15), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r14 movq 32(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 256(%rsp) # 8-byte Spill movq 40(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 248(%rsp) # 8-byte Spill movq 48(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 136(%rsp) # 8-byte Spill movq 56(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 128(%rsp) # 8-byte Spill movq 64(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 120(%rsp) # 8-byte Spill movq 72(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 112(%rsp) # 8-byte Spill movq 80(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 240(%rsp) # 8-byte Spill movq 88(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 232(%rsp) # 8-byte Spill movq 96(%r15), %rdi xorl %esi, %esi callq strtod cvtsd2ss %xmm0, %xmm0 movss %xmm0, 104(%rsp) # 4-byte Spill movl $40, %edi callq malloc movss 104(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero movq %rax, %r12 .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 movss %xmm0, (%r12,%r13,4) incq %r13 cmpq $10, %r13 jne .LBB2_1 # %bb.2: # %_Z11initPointerf.exit movq 104(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 104(%rsp) # 8-byte Spill movq 112(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 224(%rsp) # 8-byte Spill movq 120(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 216(%rsp) # 8-byte Spill movq 128(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 208(%rsp) # 8-byte Spill movq 136(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 200(%rsp) # 8-byte Spill movq 144(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 192(%rsp) # 8-byte Spill movq 152(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 184(%rsp) # 8-byte Spill movq 160(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 176(%rsp) # 8-byte Spill movq 168(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 168(%rsp) # 8-byte Spill movq 176(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 160(%rsp) # 8-byte Spill movq 184(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 152(%rsp) # 8-byte Spill movq 192(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 144(%rsp) # 8-byte Spill movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_4 # %bb.3: movsd 144(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm8 movsd 152(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm9 movsd 160(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm10 movsd 168(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm11 movsd 176(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm12 movsd 184(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm13 movsd 192(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm14 movsd 200(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm15 movsd 208(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm3 movsd 216(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm4 movsd 224(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm5 movsd 104(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm6 movsd 232(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm7 movsd 240(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm0, 104(%rsp) # 4-byte Spill movsd 112(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm0, 112(%rsp) # 4-byte Spill movsd 120(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm0, 120(%rsp) # 4-byte Spill movsd 128(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm0, 128(%rsp) # 4-byte Spill movsd 136(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm0, 136(%rsp) # 4-byte Spill movsd 248(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm2 movsd 256(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm1 movsd 264(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm8, 96(%rsp) movss %xmm9, 88(%rsp) movss %xmm10, 80(%rsp) movss %xmm11, 72(%rsp) movss %xmm12, 64(%rsp) movss %xmm13, 56(%rsp) movss %xmm14, 48(%rsp) movss %xmm15, 40(%rsp) movss %xmm3, 32(%rsp) movss %xmm4, 24(%rsp) movss %xmm5, 16(%rsp) movss %xmm6, 8(%rsp) movss %xmm7, (%rsp) movl %ebx, %edi movl %r14d, %esi movss 136(%rsp), %xmm3 # 4-byte Reload # xmm3 = mem[0],zero,zero,zero movss 128(%rsp), %xmm4 # 4-byte Reload # xmm4 = mem[0],zero,zero,zero movss 120(%rsp), %xmm5 # 4-byte Reload # xmm5 = mem[0],zero,zero,zero movss 112(%rsp), %xmm6 # 4-byte Reload # xmm6 = mem[0],zero,zero,zero movss 104(%rsp), %xmm7 # 4-byte Reload # xmm7 = mem[0],zero,zero,zero movq %r12, %rdx callq _Z22__device_stub__computefiiffffffffPfffffffffffff .LBB2_4: callq hipDeviceSynchronize xorl %eax, %eax addq $272, %rsp # imm = 0x110 .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7computefiiffffffffPfffffffffffff, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z7computefiiffffffffPfffffffffffff,@object # @_Z7computefiiffffffffPfffffffffffff .section .rodata,"a",@progbits .globl _Z7computefiiffffffffPfffffffffffff .p2align 3, 0x0 _Z7computefiiffffffffPfffffffffffff: .quad _Z22__device_stub__computefiiffffffffPfffffffffffff .size _Z7computefiiffffffffPfffffffffffff, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z7computefiiffffffffPfffffffffffff" .size .L__unnamed_1, 36 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z22__device_stub__computefiiffffffffPfffffffffffff .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z7computefiiffffffffPfffffffffffff .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000a9d3e_00000000-6_test.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z11initPointerf .type _Z11initPointerf, @function _Z11initPointerf: .LFB2057: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movd %xmm0, %ebx movl $40, %edi call malloc@PLT movq %rax, %rdx leaq 40(%rax), %rcx .L4: movl %ebx, (%rdx) addq $4, %rdx cmpq %rcx, %rdx jne .L4 popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z11initPointerf, .-_Z11initPointerf .globl _Z49__device_stub__Z7computefiiffffffffPffffffffffffffiiffffffffPfffffffffffff .type _Z49__device_stub__Z7computefiiffffffffPffffffffffffffiiffffffffPfffffffffffff, @function _Z49__device_stub__Z7computefiiffffffffPffffffffffffffiiffffffffPfffffffffffff: .LFB2083: .cfi_startproc endbr64 subq $328, %rsp .cfi_def_cfa_offset 336 movss %xmm0, 44(%rsp) movl %edi, 40(%rsp) movl %esi, 36(%rsp) movss %xmm1, 32(%rsp) movss %xmm2, 28(%rsp) movss %xmm3, 24(%rsp) movss %xmm4, 20(%rsp) movss %xmm5, 16(%rsp) movss %xmm6, 12(%rsp) movss %xmm7, 8(%rsp) movq %rdx, (%rsp) movq %fs:40, %rax movq %rax, 312(%rsp) xorl %eax, %eax leaq 44(%rsp), %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rax movq %rax, 120(%rsp) leaq 36(%rsp), %rax movq %rax, 128(%rsp) leaq 32(%rsp), %rax movq %rax, 136(%rsp) leaq 28(%rsp), %rax movq %rax, 144(%rsp) leaq 24(%rsp), %rax movq %rax, 152(%rsp) leaq 20(%rsp), %rax movq %rax, 160(%rsp) leaq 16(%rsp), %rax movq %rax, 168(%rsp) leaq 12(%rsp), %rax movq %rax, 176(%rsp) leaq 8(%rsp), %rax movq %rax, 184(%rsp) leaq 336(%rsp), %rax movq %rax, 192(%rsp) movq %rsp, %rax movq %rax, 200(%rsp) leaq 344(%rsp), %rax movq %rax, 208(%rsp) leaq 352(%rsp), %rax movq %rax, 216(%rsp) leaq 360(%rsp), %rax movq %rax, 224(%rsp) leaq 368(%rsp), %rax movq %rax, 232(%rsp) leaq 376(%rsp), %rax movq %rax, 240(%rsp) leaq 384(%rsp), %rax movq %rax, 248(%rsp) leaq 392(%rsp), %rax movq %rax, 256(%rsp) leaq 400(%rsp), %rax movq %rax, 264(%rsp) leaq 408(%rsp), %rax movq %rax, 272(%rsp) leaq 416(%rsp), %rax movq %rax, 280(%rsp) leaq 424(%rsp), %rax movq %rax, 288(%rsp) leaq 432(%rsp), %rax movq %rax, 296(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L11 .L7: movq 312(%rsp), %rax subq %fs:40, %rax jne .L12 addq $328, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 344 pushq 56(%rsp) .cfi_def_cfa_offset 352 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z7computefiiffffffffPfffffffffffff(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 336 jmp .L7 .L12: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z49__device_stub__Z7computefiiffffffffPffffffffffffffiiffffffffPfffffffffffff, .-_Z49__device_stub__Z7computefiiffffffffPffffffffffffffiiffffffffPfffffffffffff .globl _Z7computefiiffffffffPfffffffffffff .type _Z7computefiiffffffffPfffffffffffff, @function _Z7computefiiffffffffPfffffffffffff: .LFB2084: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movss 224(%rsp), %xmm8 movss %xmm8, 96(%rsp) movss 216(%rsp), %xmm8 movss %xmm8, 88(%rsp) movss 208(%rsp), %xmm8 movss %xmm8, 80(%rsp) movss 200(%rsp), %xmm8 movss %xmm8, 72(%rsp) movss 192(%rsp), %xmm8 movss %xmm8, 64(%rsp) movss 184(%rsp), %xmm8 movss %xmm8, 56(%rsp) movss 176(%rsp), %xmm8 movss %xmm8, 48(%rsp) movss 168(%rsp), %xmm8 movss %xmm8, 40(%rsp) movss 160(%rsp), %xmm8 movss %xmm8, 32(%rsp) movss 152(%rsp), %xmm8 movss %xmm8, 24(%rsp) movss 144(%rsp), %xmm8 movss %xmm8, 16(%rsp) movss 136(%rsp), %xmm8 movss %xmm8, 8(%rsp) movss 128(%rsp), %xmm8 movss %xmm8, (%rsp) call _Z49__device_stub__Z7computefiiffffffffPffffffffffffffiiffffffffPfffffffffffff addq $120, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z7computefiiffffffffPfffffffffffff, .-_Z7computefiiffffffffPfffffffffffff .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $216, %rsp .cfi_def_cfa_offset 256 movq %rsi, %rbx movq 8(%rsi), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 168(%rsp) movq 16(%rbx), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %rbp movq 24(%rbx), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %r12 movq 32(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 160(%rsp) movq 40(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 152(%rsp) movq 48(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 144(%rsp) movq 56(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 136(%rsp) movq 64(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 128(%rsp) movq 72(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 120(%rsp) movq 80(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 112(%rsp) movq 88(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 104(%rsp) movq 96(%rbx), %rdi movl $0, %esi call strtod@PLT cvtsd2ss %xmm0, %xmm0 call _Z11initPointerf movq %rax, %r13 movq 104(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 96(%rsp) movq 112(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 88(%rsp) movq 120(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 80(%rsp) movq 128(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 72(%rsp) movq 136(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 64(%rsp) movq 144(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 56(%rsp) movq 152(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 48(%rsp) movq 160(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 40(%rsp) movq 168(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 32(%rsp) movq 176(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 24(%rsp) movq 184(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 16(%rsp) movq 192(%rbx), %rdi movl $0, %esi call strtod@PLT movsd %xmm0, 8(%rsp) movl $1, 196(%rsp) movl $1, 200(%rsp) movl $1, 184(%rsp) movl $1, 188(%rsp) movl $0, %r9d movl $0, %r8d movq 196(%rsp), %rdx movl $1, %ecx movq 184(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L18 .L16: call cudaDeviceSynchronize@PLT movl $0, %eax addq $216, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L18: .cfi_restore_state pxor %xmm0, %xmm0 cvtsd2ss 168(%rsp), %xmm0 subq $112, %rsp .cfi_def_cfa_offset 368 pxor %xmm1, %xmm1 cvtsd2ss 120(%rsp), %xmm1 movss %xmm1, 96(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 128(%rsp), %xmm1 movss %xmm1, 88(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 136(%rsp), %xmm1 movss %xmm1, 80(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 144(%rsp), %xmm1 movss %xmm1, 72(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 152(%rsp), %xmm1 movss %xmm1, 64(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 160(%rsp), %xmm1 movss %xmm1, 56(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 168(%rsp), %xmm1 movss %xmm1, 48(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 176(%rsp), %xmm1 movss %xmm1, 40(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 184(%rsp), %xmm1 movss %xmm1, 32(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 192(%rsp), %xmm1 movss %xmm1, 24(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 200(%rsp), %xmm1 movss %xmm1, 16(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 208(%rsp), %xmm1 movss %xmm1, 8(%rsp) pxor %xmm1, %xmm1 cvtsd2ss 216(%rsp), %xmm1 movss %xmm1, (%rsp) movq %r13, %rdx pxor %xmm7, %xmm7 cvtsd2ss 224(%rsp), %xmm7 pxor %xmm6, %xmm6 cvtsd2ss 232(%rsp), %xmm6 pxor %xmm5, %xmm5 cvtsd2ss 240(%rsp), %xmm5 pxor %xmm4, %xmm4 cvtsd2ss 248(%rsp), %xmm4 pxor %xmm3, %xmm3 cvtsd2ss 256(%rsp), %xmm3 pxor %xmm2, %xmm2 cvtsd2ss 264(%rsp), %xmm2 pxor %xmm1, %xmm1 cvtsd2ss 272(%rsp), %xmm1 movl %r12d, %esi movl %ebp, %edi call _Z49__device_stub__Z7computefiiffffffffPffffffffffffffiiffffffffPfffffffffffff addq $112, %rsp .cfi_def_cfa_offset 256 jmp .L16 .cfi_endproc .LFE2058: .size main, .-main .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z7computefiiffffffffPfffffffffffff" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z7computefiiffffffffPfffffffffffff(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "test.hip" .globl _Z22__device_stub__computefiiffffffffPfffffffffffff # -- Begin function _Z22__device_stub__computefiiffffffffPfffffffffffff .p2align 4, 0x90 .type _Z22__device_stub__computefiiffffffffPfffffffffffff,@function _Z22__device_stub__computefiiffffffffPfffffffffffff: # @_Z22__device_stub__computefiiffffffffPfffffffffffff .cfi_startproc # %bb.0: subq $296, %rsp # imm = 0x128 .cfi_def_cfa_offset 304 movss %xmm0, 36(%rsp) movl %edi, 32(%rsp) movl %esi, 28(%rsp) movss %xmm1, 24(%rsp) movss %xmm2, 20(%rsp) movss %xmm3, 16(%rsp) movss %xmm4, 12(%rsp) movss %xmm5, 8(%rsp) movss %xmm6, 4(%rsp) movss %xmm7, (%rsp) movq %rdx, 88(%rsp) leaq 36(%rsp), %rax movq %rax, 96(%rsp) leaq 32(%rsp), %rax movq %rax, 104(%rsp) leaq 28(%rsp), %rax movq %rax, 112(%rsp) leaq 24(%rsp), %rax movq %rax, 120(%rsp) leaq 20(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 12(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) leaq 4(%rsp), %rax movq %rax, 160(%rsp) movq %rsp, %rax movq %rax, 168(%rsp) leaq 304(%rsp), %rax movq %rax, 176(%rsp) leaq 88(%rsp), %rax movq %rax, 184(%rsp) leaq 312(%rsp), %rax movq %rax, 192(%rsp) leaq 320(%rsp), %rax movq %rax, 200(%rsp) leaq 328(%rsp), %rax movq %rax, 208(%rsp) leaq 336(%rsp), %rax movq %rax, 216(%rsp) leaq 344(%rsp), %rax movq %rax, 224(%rsp) leaq 352(%rsp), %rax movq %rax, 232(%rsp) leaq 360(%rsp), %rax movq %rax, 240(%rsp) leaq 368(%rsp), %rax movq %rax, 248(%rsp) leaq 376(%rsp), %rax movq %rax, 256(%rsp) leaq 384(%rsp), %rax movq %rax, 264(%rsp) leaq 392(%rsp), %rax movq %rax, 272(%rsp) leaq 400(%rsp), %rax movq %rax, 280(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z7computefiiffffffffPfffffffffffff, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $312, %rsp # imm = 0x138 .cfi_adjust_cfa_offset -312 retq .Lfunc_end0: .size _Z22__device_stub__computefiiffffffffPfffffffffffff, .Lfunc_end0-_Z22__device_stub__computefiiffffffffPfffffffffffff .cfi_endproc # -- End function .globl _Z11initPointerf # -- Begin function _Z11initPointerf .p2align 4, 0x90 .type _Z11initPointerf,@function _Z11initPointerf: # @_Z11initPointerf .cfi_startproc # %bb.0: pushq %rax .cfi_def_cfa_offset 16 movss %xmm0, 4(%rsp) # 4-byte Spill movl $40, %edi callq malloc movss 4(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero xorl %ecx, %ecx .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movss %xmm0, (%rax,%rcx,4) incq %rcx cmpq $10, %rcx jne .LBB1_1 # %bb.2: popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z11initPointerf, .Lfunc_end1-_Z11initPointerf .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $272, %rsp # imm = 0x110 .cfi_def_cfa_offset 320 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rsi, %r15 movq 8(%rsi), %rdi xorl %r13d, %r13d xorl %esi, %esi callq strtod movsd %xmm0, 264(%rsp) # 8-byte Spill movq 16(%r15), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %rbx movq 24(%r15), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r14 movq 32(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 256(%rsp) # 8-byte Spill movq 40(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 248(%rsp) # 8-byte Spill movq 48(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 136(%rsp) # 8-byte Spill movq 56(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 128(%rsp) # 8-byte Spill movq 64(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 120(%rsp) # 8-byte Spill movq 72(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 112(%rsp) # 8-byte Spill movq 80(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 240(%rsp) # 8-byte Spill movq 88(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 232(%rsp) # 8-byte Spill movq 96(%r15), %rdi xorl %esi, %esi callq strtod cvtsd2ss %xmm0, %xmm0 movss %xmm0, 104(%rsp) # 4-byte Spill movl $40, %edi callq malloc movss 104(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero movq %rax, %r12 .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 movss %xmm0, (%r12,%r13,4) incq %r13 cmpq $10, %r13 jne .LBB2_1 # %bb.2: # %_Z11initPointerf.exit movq 104(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 104(%rsp) # 8-byte Spill movq 112(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 224(%rsp) # 8-byte Spill movq 120(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 216(%rsp) # 8-byte Spill movq 128(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 208(%rsp) # 8-byte Spill movq 136(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 200(%rsp) # 8-byte Spill movq 144(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 192(%rsp) # 8-byte Spill movq 152(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 184(%rsp) # 8-byte Spill movq 160(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 176(%rsp) # 8-byte Spill movq 168(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 168(%rsp) # 8-byte Spill movq 176(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 160(%rsp) # 8-byte Spill movq 184(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 152(%rsp) # 8-byte Spill movq 192(%r15), %rdi xorl %esi, %esi callq strtod movsd %xmm0, 144(%rsp) # 8-byte Spill movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_4 # %bb.3: movsd 144(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm8 movsd 152(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm9 movsd 160(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm10 movsd 168(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm11 movsd 176(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm12 movsd 184(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm13 movsd 192(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm14 movsd 200(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm15 movsd 208(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm3 movsd 216(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm4 movsd 224(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm5 movsd 104(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm6 movsd 232(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm7 movsd 240(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm0, 104(%rsp) # 4-byte Spill movsd 112(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm0, 112(%rsp) # 4-byte Spill movsd 120(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm0, 120(%rsp) # 4-byte Spill movsd 128(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm0, 128(%rsp) # 4-byte Spill movsd 136(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm0, 136(%rsp) # 4-byte Spill movsd 248(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm2 movsd 256(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm1 movsd 264(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm8, 96(%rsp) movss %xmm9, 88(%rsp) movss %xmm10, 80(%rsp) movss %xmm11, 72(%rsp) movss %xmm12, 64(%rsp) movss %xmm13, 56(%rsp) movss %xmm14, 48(%rsp) movss %xmm15, 40(%rsp) movss %xmm3, 32(%rsp) movss %xmm4, 24(%rsp) movss %xmm5, 16(%rsp) movss %xmm6, 8(%rsp) movss %xmm7, (%rsp) movl %ebx, %edi movl %r14d, %esi movss 136(%rsp), %xmm3 # 4-byte Reload # xmm3 = mem[0],zero,zero,zero movss 128(%rsp), %xmm4 # 4-byte Reload # xmm4 = mem[0],zero,zero,zero movss 120(%rsp), %xmm5 # 4-byte Reload # xmm5 = mem[0],zero,zero,zero movss 112(%rsp), %xmm6 # 4-byte Reload # xmm6 = mem[0],zero,zero,zero movss 104(%rsp), %xmm7 # 4-byte Reload # xmm7 = mem[0],zero,zero,zero movq %r12, %rdx callq _Z22__device_stub__computefiiffffffffPfffffffffffff .LBB2_4: callq hipDeviceSynchronize xorl %eax, %eax addq $272, %rsp # imm = 0x110 .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7computefiiffffffffPfffffffffffff, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z7computefiiffffffffPfffffffffffff,@object # @_Z7computefiiffffffffPfffffffffffff .section .rodata,"a",@progbits .globl _Z7computefiiffffffffPfffffffffffff .p2align 3, 0x0 _Z7computefiiffffffffPfffffffffffff: .quad _Z22__device_stub__computefiiffffffffPfffffffffffff .size _Z7computefiiffffffffPfffffffffffff, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z7computefiiffffffffPfffffffffffff" .size .L__unnamed_1, 36 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z22__device_stub__computefiiffffffffPfffffffffffff .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z7computefiiffffffffPfffffffffffff .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
// // Created by luchin on 29-07-21. // #include <cassert> #include <iostream> //static cudaError_t checkCuda(cudaError_t result) { // if (result != cudaSuccess) { // fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); // assert(result == cudaSuccess); // } // return result; //} // //Par_CUDA::Par_CUDA() : AbstractGoL() { // int devId = 0; // cudaDeviceProp prop; // checkCuda(cudaGetDeviceProperties(&prop, devId)); // checkCuda(cudaSetDevice(devId)); // int total_size = sizeof(char) * LARGO * LARGO; // d_grid = nullptr; // checkCuda(cudaMalloc(&d_grid, total_size)); //} // //__global__ void step(char *grid) { //#ifdef CUDA_USE_2D // int x = blockIdx.x * blockDim.x + threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; //#else // int tmp = blockIdx.x * blockDim.x + threadIdx.x; // int x = tmp / LARGO; // int y = tmp % LARGO; //#endif // // // contamos los vecinos // // printf("x is %d and y is %d\n", x, y); // if (x > LARGO || y > LARGO) return; // x += LARGO; // nos aseguramos de que x-1 sea positivo // y += LARGO; // int x_m = (x - 1) % LARGO; // int x_p = (x + 1) % LARGO; // int y_m = (y - 1) % LARGO; // int y_p = (y + 1) % LARGO; // x = x % LARGO; // y = y % LARGO; // int num_neighbors = // grid[x_m * LARGO + y_m] + grid[x * LARGO + y_m] + grid[x_p * LARGO + y_m] + // grid[x_m * LARGO + y] + grid[x_p * LARGO + y] + // grid[x_m * LARGO + y_p] + grid[x * LARGO + y_p] + grid[x_p * LARGO + y_p]; // char alive = grid[x * LARGO + y]; // // __syncthreads(); // // reemplazamos los lugares donde corresponde // if ((alive && num_neighbors == 2) || num_neighbors == 3) { // grid[x * LARGO + y] = 1; // } else { // grid[x * LARGO + y] = 0; // } //} // // //void Par_CUDA::run_game(int num_steps) { //#ifdef CUDA_USE_2D // dim3 dimGrid((LARGO + 7) / 8, (LARGO + 7) / 88, 1); // dim3 dimBlock(8, 8, 1); //#else // dim3 dimGrid((LARGO * LARGO + 7) / 8, 1, 1); // dim3 dimBlock(8, 1, 1); //#endif // cudaMemcpy(d_grid, h_grid, sizeof(char) * LARGO * LARGO, cudaMemcpyHostToDevice); // for (int i = 0; i < num_steps; i++) { // step<<<dimGrid, dimBlock>>>(d_grid); // } // cudaMemcpy(h_grid, d_grid, sizeof(char) * LARGO * LARGO, cudaMemcpyDeviceToHost); //}
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
// // Created by luchin on 29-07-21. // #include <cassert> #include <iostream> //static cudaError_t checkCuda(cudaError_t result) { // if (result != cudaSuccess) { // fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); // assert(result == cudaSuccess); // } // return result; //} // //Par_CUDA::Par_CUDA() : AbstractGoL() { // int devId = 0; // cudaDeviceProp prop; // checkCuda(cudaGetDeviceProperties(&prop, devId)); // checkCuda(cudaSetDevice(devId)); // int total_size = sizeof(char) * LARGO * LARGO; // d_grid = nullptr; // checkCuda(cudaMalloc(&d_grid, total_size)); //} // //__global__ void step(char *grid) { //#ifdef CUDA_USE_2D // int x = blockIdx.x * blockDim.x + threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; //#else // int tmp = blockIdx.x * blockDim.x + threadIdx.x; // int x = tmp / LARGO; // int y = tmp % LARGO; //#endif // // // contamos los vecinos // // printf("x is %d and y is %d\n", x, y); // if (x > LARGO || y > LARGO) return; // x += LARGO; // nos aseguramos de que x-1 sea positivo // y += LARGO; // int x_m = (x - 1) % LARGO; // int x_p = (x + 1) % LARGO; // int y_m = (y - 1) % LARGO; // int y_p = (y + 1) % LARGO; // x = x % LARGO; // y = y % LARGO; // int num_neighbors = // grid[x_m * LARGO + y_m] + grid[x * LARGO + y_m] + grid[x_p * LARGO + y_m] + // grid[x_m * LARGO + y] + grid[x_p * LARGO + y] + // grid[x_m * LARGO + y_p] + grid[x * LARGO + y_p] + grid[x_p * LARGO + y_p]; // char alive = grid[x * LARGO + y]; // // __syncthreads(); // // reemplazamos los lugares donde corresponde // if ((alive && num_neighbors == 2) || num_neighbors == 3) { // grid[x * LARGO + y] = 1; // } else { // grid[x * LARGO + y] = 0; // } //} // // //void Par_CUDA::run_game(int num_steps) { //#ifdef CUDA_USE_2D // dim3 dimGrid((LARGO + 7) / 8, (LARGO + 7) / 88, 1); // dim3 dimBlock(8, 8, 1); //#else // dim3 dimGrid((LARGO * LARGO + 7) / 8, 1, 1); // dim3 dimBlock(8, 1, 1); //#endif // cudaMemcpy(d_grid, h_grid, sizeof(char) * LARGO * LARGO, cudaMemcpyHostToDevice); // for (int i = 0; i < num_steps; i++) { // step<<<dimGrid, dimBlock>>>(d_grid); // } // cudaMemcpy(h_grid, d_grid, sizeof(char) * LARGO * LARGO, cudaMemcpyDeviceToHost); //}
.file "tmpxft_0000d3e1_00000000-6_swe_cuda.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3671: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3671: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3694: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3694: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
// // Created by luchin on 29-07-21. // #include <cassert> #include <iostream> //static cudaError_t checkCuda(cudaError_t result) { // if (result != cudaSuccess) { // fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); // assert(result == cudaSuccess); // } // return result; //} // //Par_CUDA::Par_CUDA() : AbstractGoL() { // int devId = 0; // cudaDeviceProp prop; // checkCuda(cudaGetDeviceProperties(&prop, devId)); // checkCuda(cudaSetDevice(devId)); // int total_size = sizeof(char) * LARGO * LARGO; // d_grid = nullptr; // checkCuda(cudaMalloc(&d_grid, total_size)); //} // //__global__ void step(char *grid) { //#ifdef CUDA_USE_2D // int x = blockIdx.x * blockDim.x + threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; //#else // int tmp = blockIdx.x * blockDim.x + threadIdx.x; // int x = tmp / LARGO; // int y = tmp % LARGO; //#endif // // // contamos los vecinos // // printf("x is %d and y is %d\n", x, y); // if (x > LARGO || y > LARGO) return; // x += LARGO; // nos aseguramos de que x-1 sea positivo // y += LARGO; // int x_m = (x - 1) % LARGO; // int x_p = (x + 1) % LARGO; // int y_m = (y - 1) % LARGO; // int y_p = (y + 1) % LARGO; // x = x % LARGO; // y = y % LARGO; // int num_neighbors = // grid[x_m * LARGO + y_m] + grid[x * LARGO + y_m] + grid[x_p * LARGO + y_m] + // grid[x_m * LARGO + y] + grid[x_p * LARGO + y] + // grid[x_m * LARGO + y_p] + grid[x * LARGO + y_p] + grid[x_p * LARGO + y_p]; // char alive = grid[x * LARGO + y]; // // __syncthreads(); // // reemplazamos los lugares donde corresponde // if ((alive && num_neighbors == 2) || num_neighbors == 3) { // grid[x * LARGO + y] = 1; // } else { // grid[x * LARGO + y] = 0; // } //} // // //void Par_CUDA::run_game(int num_steps) { //#ifdef CUDA_USE_2D // dim3 dimGrid((LARGO + 7) / 8, (LARGO + 7) / 88, 1); // dim3 dimBlock(8, 8, 1); //#else // dim3 dimGrid((LARGO * LARGO + 7) / 8, 1, 1); // dim3 dimBlock(8, 1, 1); //#endif // cudaMemcpy(d_grid, h_grid, sizeof(char) * LARGO * LARGO, cudaMemcpyHostToDevice); // for (int i = 0; i < num_steps; i++) { // step<<<dimGrid, dimBlock>>>(d_grid); // } // cudaMemcpy(h_grid, d_grid, sizeof(char) * LARGO * LARGO, cudaMemcpyDeviceToHost); //}
// // Created by luchin on 29-07-21. // #include <hip/hip_runtime.h> #include <cassert> #include <iostream> //static cudaError_t checkCuda(cudaError_t result) { // if (result != cudaSuccess) { // fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); // assert(result == cudaSuccess); // } // return result; //} // //Par_CUDA::Par_CUDA() : AbstractGoL() { // int devId = 0; // cudaDeviceProp prop; // checkCuda(cudaGetDeviceProperties(&prop, devId)); // checkCuda(cudaSetDevice(devId)); // int total_size = sizeof(char) * LARGO * LARGO; // d_grid = nullptr; // checkCuda(cudaMalloc(&d_grid, total_size)); //} // //__global__ void step(char *grid) { //#ifdef CUDA_USE_2D // int x = blockIdx.x * blockDim.x + threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; //#else // int tmp = blockIdx.x * blockDim.x + threadIdx.x; // int x = tmp / LARGO; // int y = tmp % LARGO; //#endif // // // contamos los vecinos // // printf("x is %d and y is %d\n", x, y); // if (x > LARGO || y > LARGO) return; // x += LARGO; // nos aseguramos de que x-1 sea positivo // y += LARGO; // int x_m = (x - 1) % LARGO; // int x_p = (x + 1) % LARGO; // int y_m = (y - 1) % LARGO; // int y_p = (y + 1) % LARGO; // x = x % LARGO; // y = y % LARGO; // int num_neighbors = // grid[x_m * LARGO + y_m] + grid[x * LARGO + y_m] + grid[x_p * LARGO + y_m] + // grid[x_m * LARGO + y] + grid[x_p * LARGO + y] + // grid[x_m * LARGO + y_p] + grid[x * LARGO + y_p] + grid[x_p * LARGO + y_p]; // char alive = grid[x * LARGO + y]; // // __syncthreads(); // // reemplazamos los lugares donde corresponde // if ((alive && num_neighbors == 2) || num_neighbors == 3) { // grid[x * LARGO + y] = 1; // } else { // grid[x * LARGO + y] = 0; // } //} // // //void Par_CUDA::run_game(int num_steps) { //#ifdef CUDA_USE_2D // dim3 dimGrid((LARGO + 7) / 8, (LARGO + 7) / 88, 1); // dim3 dimBlock(8, 8, 1); //#else // dim3 dimGrid((LARGO * LARGO + 7) / 8, 1, 1); // dim3 dimBlock(8, 1, 1); //#endif // cudaMemcpy(d_grid, h_grid, sizeof(char) * LARGO * LARGO, cudaMemcpyHostToDevice); // for (int i = 0; i < num_steps; i++) { // step<<<dimGrid, dimBlock>>>(d_grid); // } // cudaMemcpy(h_grid, d_grid, sizeof(char) * LARGO * LARGO, cudaMemcpyDeviceToHost); //}
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
// // Created by luchin on 29-07-21. // #include <hip/hip_runtime.h> #include <cassert> #include <iostream> //static cudaError_t checkCuda(cudaError_t result) { // if (result != cudaSuccess) { // fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); // assert(result == cudaSuccess); // } // return result; //} // //Par_CUDA::Par_CUDA() : AbstractGoL() { // int devId = 0; // cudaDeviceProp prop; // checkCuda(cudaGetDeviceProperties(&prop, devId)); // checkCuda(cudaSetDevice(devId)); // int total_size = sizeof(char) * LARGO * LARGO; // d_grid = nullptr; // checkCuda(cudaMalloc(&d_grid, total_size)); //} // //__global__ void step(char *grid) { //#ifdef CUDA_USE_2D // int x = blockIdx.x * blockDim.x + threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; //#else // int tmp = blockIdx.x * blockDim.x + threadIdx.x; // int x = tmp / LARGO; // int y = tmp % LARGO; //#endif // // // contamos los vecinos // // printf("x is %d and y is %d\n", x, y); // if (x > LARGO || y > LARGO) return; // x += LARGO; // nos aseguramos de que x-1 sea positivo // y += LARGO; // int x_m = (x - 1) % LARGO; // int x_p = (x + 1) % LARGO; // int y_m = (y - 1) % LARGO; // int y_p = (y + 1) % LARGO; // x = x % LARGO; // y = y % LARGO; // int num_neighbors = // grid[x_m * LARGO + y_m] + grid[x * LARGO + y_m] + grid[x_p * LARGO + y_m] + // grid[x_m * LARGO + y] + grid[x_p * LARGO + y] + // grid[x_m * LARGO + y_p] + grid[x * LARGO + y_p] + grid[x_p * LARGO + y_p]; // char alive = grid[x * LARGO + y]; // // __syncthreads(); // // reemplazamos los lugares donde corresponde // if ((alive && num_neighbors == 2) || num_neighbors == 3) { // grid[x * LARGO + y] = 1; // } else { // grid[x * LARGO + y] = 0; // } //} // // //void Par_CUDA::run_game(int num_steps) { //#ifdef CUDA_USE_2D // dim3 dimGrid((LARGO + 7) / 8, (LARGO + 7) / 88, 1); // dim3 dimBlock(8, 8, 1); //#else // dim3 dimGrid((LARGO * LARGO + 7) / 8, 1, 1); // dim3 dimBlock(8, 1, 1); //#endif // cudaMemcpy(d_grid, h_grid, sizeof(char) * LARGO * LARGO, cudaMemcpyHostToDevice); // for (int i = 0; i < num_steps; i++) { // step<<<dimGrid, dimBlock>>>(d_grid); // } // cudaMemcpy(h_grid, d_grid, sizeof(char) * LARGO * LARGO, cudaMemcpyDeviceToHost); //}
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
// // Created by luchin on 29-07-21. // #include <hip/hip_runtime.h> #include <cassert> #include <iostream> //static cudaError_t checkCuda(cudaError_t result) { // if (result != cudaSuccess) { // fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); // assert(result == cudaSuccess); // } // return result; //} // //Par_CUDA::Par_CUDA() : AbstractGoL() { // int devId = 0; // cudaDeviceProp prop; // checkCuda(cudaGetDeviceProperties(&prop, devId)); // checkCuda(cudaSetDevice(devId)); // int total_size = sizeof(char) * LARGO * LARGO; // d_grid = nullptr; // checkCuda(cudaMalloc(&d_grid, total_size)); //} // //__global__ void step(char *grid) { //#ifdef CUDA_USE_2D // int x = blockIdx.x * blockDim.x + threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; //#else // int tmp = blockIdx.x * blockDim.x + threadIdx.x; // int x = tmp / LARGO; // int y = tmp % LARGO; //#endif // // // contamos los vecinos // // printf("x is %d and y is %d\n", x, y); // if (x > LARGO || y > LARGO) return; // x += LARGO; // nos aseguramos de que x-1 sea positivo // y += LARGO; // int x_m = (x - 1) % LARGO; // int x_p = (x + 1) % LARGO; // int y_m = (y - 1) % LARGO; // int y_p = (y + 1) % LARGO; // x = x % LARGO; // y = y % LARGO; // int num_neighbors = // grid[x_m * LARGO + y_m] + grid[x * LARGO + y_m] + grid[x_p * LARGO + y_m] + // grid[x_m * LARGO + y] + grid[x_p * LARGO + y] + // grid[x_m * LARGO + y_p] + grid[x * LARGO + y_p] + grid[x_p * LARGO + y_p]; // char alive = grid[x * LARGO + y]; // // __syncthreads(); // // reemplazamos los lugares donde corresponde // if ((alive && num_neighbors == 2) || num_neighbors == 3) { // grid[x * LARGO + y] = 1; // } else { // grid[x * LARGO + y] = 0; // } //} // // //void Par_CUDA::run_game(int num_steps) { //#ifdef CUDA_USE_2D // dim3 dimGrid((LARGO + 7) / 8, (LARGO + 7) / 88, 1); // dim3 dimBlock(8, 8, 1); //#else // dim3 dimGrid((LARGO * LARGO + 7) / 8, 1, 1); // dim3 dimBlock(8, 1, 1); //#endif // cudaMemcpy(d_grid, h_grid, sizeof(char) * LARGO * LARGO, cudaMemcpyHostToDevice); // for (int i = 0; i < num_steps; i++) { // step<<<dimGrid, dimBlock>>>(d_grid); // } // cudaMemcpy(h_grid, d_grid, sizeof(char) * LARGO * LARGO, cudaMemcpyDeviceToHost); //}
.text .file "swe_cuda.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0000d3e1_00000000-6_swe_cuda.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3671: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3671: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3694: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3694: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "swe_cuda.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> #include <vector> #include <cmath> #include <chrono> using namespace std; using namespace std::chrono; #define BLOCK_SIZE 16 #define N 1024 __global__ void gpu_matrix_mul(int *a, int *b, int *c){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if(col < N && row < N){ for(int i = 0;i < N; i++){ sum += a[row*N + i] * b[i*N + col]; } c[row*N + col] = sum; } } void cpu_matrix_mul(int A[N][N], int B[N][N], int C[N][N]){ auto start = high_resolution_clock::now(); for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ for(int k = 0;k < N;k ++){ C[i][j] += A[i][k]*B[k][j]; } } } auto stop = high_resolution_clock::now(); auto cpu_time = duration_cast<microseconds>(stop - start).count(); cout << endl << " CPU exec time: " << cpu_time << endl; } int main(){ //CPU duration count int CPU_A[N][N], CPU_B[N][N], CPU_C[N][N]; for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ CPU_A[i][j] = rand()%293; CPU_B[i][j] = rand()%66; } } cpu_matrix_mul(CPU_A, CPU_B, CPU_C); //GPU duration count int *host_a, *host_b, *host_c, *device_a, *device_b, *device_c; host_a = (int *)malloc((N*N) * sizeof(int)); host_b = (int *)malloc((N*N) * sizeof(int)); host_c = (int *)malloc((N*N) * sizeof(int)); for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ host_a[i * N + j] = CPU_A[i][j]; host_b[i * N + j] = CPU_B[i][j]; } } cudaMalloc(&device_a, (N*N)*sizeof(int)); cudaMalloc(&device_b, (N*N)*sizeof(int)); cudaMalloc(&device_c, (N*N)*sizeof(int)); cudaMemcpy(device_a, host_a, (N*N)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_b, host_b, (N*N)*sizeof(int), cudaMemcpyHostToDevice); dim3 dimGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); auto start = high_resolution_clock::now(); gpu_matrix_mul<<<dimGrid, dimBlock>>>(device_a, device_b, device_c); auto stop = high_resolution_clock::now(); auto gpu_time = duration_cast<microseconds>(stop - start).count(); cout << endl << " GPU time: " << gpu_time << endl; cudaMemcpy(host_c, device_c, (N*N)*sizeof(int), cudaMemcpyDeviceToHost); //Verify cout << host_c[0] << " " << CPU_C[0][0] << endl; cout << host_c[1] << " " << CPU_C[0][1] << endl; for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ if(host_c[i * N + j] != CPU_C[i][j]){ cout << endl << "FAILED" << endl; return -1; } } } cout << endl << "PASSED" << endl; }
code for sm_80 Function : _Z14gpu_matrix_mulPiS_S_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R9, SR_CTAID.Y ; /* 0x0000000000097919 */ /* 0x000e280000002600 */ /*0020*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */ /* 0x000e280000002200 */ /*0030*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e680000002500 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R9, R9, c[0x0][0x4], R0 ; /* 0x0000010009097a24 */ /* 0x001fca00078e0200 */ /*0060*/ ISETP.GT.AND P0, PT, R9, 0x3ff, PT ; /* 0x000003ff0900780c */ /* 0x000fe20003f04270 */ /*0070*/ IMAD R0, R2, c[0x0][0x0], R3 ; /* 0x0000000002007a24 */ /* 0x002fca00078e0203 */ /*0080*/ ISETP.GT.OR P0, PT, R0, 0x3ff, P0 ; /* 0x000003ff0000780c */ /* 0x000fda0000704670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ SHF.L.U32 R9, R9, 0xa, RZ ; /* 0x0000000a09097819 */ /* 0x000fe200000006ff */ /*00b0*/ HFMA2.MMA R8, -RZ, RZ, 0, 0 ; /* 0x00000000ff087435 */ /* 0x000fe200000001ff */ /*00c0*/ MOV R4, c[0x0][0x160] ; /* 0x0000580000047a02 */ /* 0x000fe20000000f00 */ /*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*00e0*/ MOV R5, c[0x0][0x164] ; /* 0x0000590000057a02 */ /* 0x000fe20000000f00 */ /*00f0*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */ /* 0x000fe20000000a00 */ /*0100*/ MOV R12, RZ ; /* 0x000000ff000c7202 */ /* 0x000fe40000000f00 */ /*0110*/ IADD3 R11, R9, 0x1, RZ ; /* 0x00000001090b7810 */ /* 0x000fe40007ffe0ff */ /*0120*/ MOV R6, UR6 ; /* 0x0000000600067c02 */ /* 0x000fe20008000f00 */ /*0130*/ IMAD.WIDE R14, R9, 0x4, R4 ; /* 0x00000004090e7825 */ /* 0x000fe200078e0204 */ /*0140*/ MOV R7, UR7 ; /* 0x0000000700077c02 */ /* 0x000fc60008000f00 */ /*0150*/ IMAD.WIDE R2, R11, 0x4, R4 ; /* 0x000000040b027825 */ /* 0x000fe400078e0204 */ /*0160*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */ /* 0x0000a4000c1e1900 */ /*0170*/ IMAD.WIDE R6, R0, 0x4, R6 ; /* 0x0000000400067825 */ /* 0x000fe400078e0206 */ /*0180*/ LDG.E R24, [R2.64] ; /* 0x0000000402187981 */ /* 0x000ee8000c1e1900 */ /*0190*/ LDG.E R17, [R6.64] ; /* 0x0000000406117981 */ /* 0x000ea8000c1e1900 */ /*01a0*/ LDG.E R27, [R6.64+0x1000] ; /* 0x00100004061b7981 */ /* 0x000ee8000c1e1900 */ /*01b0*/ LDG.E R16, [R2.64+0x4] ; /* 0x0000040402107981 */ /* 0x000f28000c1e1900 */ /*01c0*/ LDG.E R19, [R6.64+0x2000] ; /* 0x0020000406137981 */ /* 0x000f28000c1e1900 */ /*01d0*/ LDG.E R23, [R2.64+0x8] ; /* 0x0000080402177981 */ /* 0x000f68000c1e1900 */ /*01e0*/ LDG.E R20, [R6.64+0x3000] ; /* 0x0030000406147981 */ /* 0x000f68000c1e1900 */ /*01f0*/ LDG.E R22, [R2.64+0xc] ; /* 0x00000c0402167981 */ /* 0x000f68000c1e1900 */ /*0200*/ LDG.E R25, [R6.64+0x4000] ; /* 0x0040000406197981 */ /* 0x000f68000c1e1900 */ /*0210*/ LDG.E R18, [R2.64+0x10] ; /* 0x0000100402127981 */ /* 0x000f68000c1e1900 */ /*0220*/ LDG.E R21, [R6.64+0x5000] ; /* 0x0050000406157981 */ /* 0x000f68000c1e1900 */ /*0230*/ LDG.E R10, [R2.64+0x14] ; /* 0x00001404020a7981 */ /* 0x000f68000c1e1900 */ /*0240*/ LDG.E R13, [R6.64+0x6000] ; /* 0x00600004060d7981 */ /* 0x000f68000c1e1900 */ /*0250*/ LDG.E R15, [R6.64+0x7000] ; /* 0x00700004060f7981 */ /* 0x001f68000c1e1900 */ /*0260*/ LDG.E R26, [R6.64+0xf000] ; /* 0x00f00004061a7981 */ /* 0x000f68000c1e1900 */ /*0270*/ LDG.E R29, [R2.64+0x38] ; /* 0x00003804021d7981 */ /* 0x000f62000c1e1900 */ /*0280*/ IMAD R14, R17, R14, R12 ; /* 0x0000000e110e7224 */ /* 0x004fc600078e020c */ /*0290*/ LDG.E R12, [R2.64+0x18] ; /* 0x00001804020c7981 */ /* 0x000ea2000c1e1900 */ /*02a0*/ IMAD R24, R27, R24, R14 ; /* 0x000000181b187224 */ /* 0x008fc600078e020e */ /*02b0*/ LDG.E R14, [R2.64+0x1c] ; /* 0x00001c04020e7981 */ /* 0x000ee8000c1e1900 */ /*02c0*/ LDG.E R17, [R6.64+0x8000] ; /* 0x0080000406117981 */ /* 0x000ee2000c1e1900 */ /*02d0*/ IMAD R24, R19, R16, R24 ; /* 0x0000001013187224 */ /* 0x010fc600078e0218 */ /*02e0*/ LDG.E R16, [R2.64+0x20] ; /* 0x0000200402107981 */ /* 0x000f28000c1e1900 */ /*02f0*/ LDG.E R19, [R6.64+0x9000] ; /* 0x0090000406137981 */ /* 0x000f22000c1e1900 */ /*0300*/ IMAD R24, R20, R23, R24 ; /* 0x0000001714187224 */ /* 0x020fc600078e0218 */ /*0310*/ LDG.E R23, [R2.64+0x24] ; /* 0x0000240402177981 */ /* 0x000f68000c1e1900 */ /*0320*/ LDG.E R20, [R6.64+0xa000] ; /* 0x00a0000406147981 */ /* 0x000f62000c1e1900 */ /*0330*/ IMAD R24, R25, R22, R24 ; /* 0x0000001619187224 */ /* 0x000fc600078e0218 */ /*0340*/ LDG.E R25, [R2.64+0x28] ; /* 0x0000280402197981 */ /* 0x000f68000c1e1900 */ /*0350*/ LDG.E R22, [R6.64+0xb000] ; /* 0x00b0000406167981 */ /* 0x000f62000c1e1900 */ /*0360*/ IMAD R24, R21, R18, R24 ; /* 0x0000001215187224 */ /* 0x000fc600078e0218 */ /*0370*/ LDG.E R21, [R2.64+0x2c] ; /* 0x00002c0402157981 */ /* 0x000f68000c1e1900 */ /*0380*/ LDG.E R18, [R6.64+0xc000] ; /* 0x00c0000406127981 */ /* 0x000f62000c1e1900 */ /*0390*/ IMAD R28, R13, R10, R24 ; /* 0x0000000a0d1c7224 */ /* 0x000fc600078e0218 */ /*03a0*/ LDG.E R24, [R2.64+0x30] ; /* 0x0000300402187981 */ /* 0x000f68000c1e1900 */ /*03b0*/ LDG.E R13, [R6.64+0xd000] ; /* 0x00d00004060d7981 */ /* 0x000f68000c1e1900 */ /*03c0*/ LDG.E R10, [R6.64+0xe000] ; /* 0x00e00004060a7981 */ /* 0x000f68000c1e1900 */ /*03d0*/ LDG.E R27, [R2.64+0x34] ; /* 0x00003404021b7981 */ /* 0x000f62000c1e1900 */ /*03e0*/ IADD3 R8, R8, 0x10, RZ ; /* 0x0000001008087810 */ /* 0x000fc80007ffe0ff */ /*03f0*/ ISETP.NE.AND P0, PT, R8, 0x400, PT ; /* 0x000004000800780c */ /* 0x000fe20003f05270 */ /*0400*/ UIADD3 UR6, UP0, UR6, 0x10000, URZ ; /* 0x0001000006067890 */ /* 0x000fe2000ff1e03f */ /*0410*/ IADD3 R4, P1, R4, 0x40, RZ ; /* 0x0000004004047810 */ /* 0x000fc60007f3e0ff */ /*0420*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */ /* 0x000fe200087fe43f */ /*0430*/ IADD3.X R5, RZ, R5, RZ, P1, !PT ; /* 0x00000005ff057210 */ /* 0x000fe20000ffe4ff */ /*0440*/ IMAD R12, R15, R12, R28 ; /* 0x0000000c0f0c7224 */ /* 0x004fc800078e021c */ /*0450*/ IMAD R12, R17, R14, R12 ; /* 0x0000000e110c7224 */ /* 0x008fc800078e020c */ /*0460*/ IMAD R12, R19, R16, R12 ; /* 0x00000010130c7224 */ /* 0x010fc800078e020c */ /*0470*/ IMAD R12, R20, R23, R12 ; /* 0x00000017140c7224 */ /* 0x020fc800078e020c */ /*0480*/ IMAD R12, R22, R25, R12 ; /* 0x00000019160c7224 */ /* 0x000fc800078e020c */ /*0490*/ IMAD R12, R18, R21, R12 ; /* 0x00000015120c7224 */ /* 0x000fc800078e020c */ /*04a0*/ IMAD R12, R13, R24, R12 ; /* 0x000000180d0c7224 */ /* 0x000fc800078e020c */ /*04b0*/ IMAD R12, R10, R27, R12 ; /* 0x0000001b0a0c7224 */ /* 0x000fc800078e020c */ /*04c0*/ IMAD R12, R26, R29, R12 ; /* 0x0000001d1a0c7224 */ /* 0x000fe200078e020c */ /*04d0*/ @P0 BRA 0x120 ; /* 0xfffffc4000000947 */ /* 0x000fea000383ffff */ /*04e0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fe200000001ff */ /*04f0*/ IADD3 R2, R0, R9, RZ ; /* 0x0000000900027210 */ /* 0x000fd20007ffe0ff */ /*0500*/ IMAD.WIDE R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fca00078e0203 */ /*0510*/ STG.E [R2.64], R12 ; /* 0x0000000c02007986 */ /* 0x000fe2000c101904 */ /*0520*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0530*/ BRA 0x530; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0540*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0550*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0560*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0570*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0580*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0590*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <iostream> #include <vector> #include <cmath> #include <chrono> using namespace std; using namespace std::chrono; #define BLOCK_SIZE 16 #define N 1024 __global__ void gpu_matrix_mul(int *a, int *b, int *c){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if(col < N && row < N){ for(int i = 0;i < N; i++){ sum += a[row*N + i] * b[i*N + col]; } c[row*N + col] = sum; } } void cpu_matrix_mul(int A[N][N], int B[N][N], int C[N][N]){ auto start = high_resolution_clock::now(); for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ for(int k = 0;k < N;k ++){ C[i][j] += A[i][k]*B[k][j]; } } } auto stop = high_resolution_clock::now(); auto cpu_time = duration_cast<microseconds>(stop - start).count(); cout << endl << " CPU exec time: " << cpu_time << endl; } int main(){ //CPU duration count int CPU_A[N][N], CPU_B[N][N], CPU_C[N][N]; for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ CPU_A[i][j] = rand()%293; CPU_B[i][j] = rand()%66; } } cpu_matrix_mul(CPU_A, CPU_B, CPU_C); //GPU duration count int *host_a, *host_b, *host_c, *device_a, *device_b, *device_c; host_a = (int *)malloc((N*N) * sizeof(int)); host_b = (int *)malloc((N*N) * sizeof(int)); host_c = (int *)malloc((N*N) * sizeof(int)); for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ host_a[i * N + j] = CPU_A[i][j]; host_b[i * N + j] = CPU_B[i][j]; } } cudaMalloc(&device_a, (N*N)*sizeof(int)); cudaMalloc(&device_b, (N*N)*sizeof(int)); cudaMalloc(&device_c, (N*N)*sizeof(int)); cudaMemcpy(device_a, host_a, (N*N)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_b, host_b, (N*N)*sizeof(int), cudaMemcpyHostToDevice); dim3 dimGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); auto start = high_resolution_clock::now(); gpu_matrix_mul<<<dimGrid, dimBlock>>>(device_a, device_b, device_c); auto stop = high_resolution_clock::now(); auto gpu_time = duration_cast<microseconds>(stop - start).count(); cout << endl << " GPU time: " << gpu_time << endl; cudaMemcpy(host_c, device_c, (N*N)*sizeof(int), cudaMemcpyDeviceToHost); //Verify cout << host_c[0] << " " << CPU_C[0][0] << endl; cout << host_c[1] << " " << CPU_C[0][1] << endl; for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ if(host_c[i * N + j] != CPU_C[i][j]){ cout << endl << "FAILED" << endl; return -1; } } } cout << endl << "PASSED" << endl; }
.file "tmpxft_0003d238_00000000-6_matrixMultiplication.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB4137: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4137: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string " CPU exec time: " .text .globl _Z14cpu_matrix_mulPA1024_iS0_S0_ .type _Z14cpu_matrix_mulPA1024_iS0_S0_, @function _Z14cpu_matrix_mulPA1024_iS0_S0_: .LFB4131: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $8, %rsp .cfi_def_cfa_offset 48 movq %rdi, %rbx movq %rsi, %rbp movq %rdx, %r13 call _ZNSt6chrono3_V212system_clock3nowEv@PLT movq %rax, %r12 movl $0, %r8d leaq 4198400(%rbp), %r9 jmp .L4 .L17: addq $4, %rsi addq $4, %rdi cmpq %rdi, %r9 je .L6 .L8: leaq (%rbx,%r8), %rcx leaq -4194304(%rdi), %rax .L5: movl (%rcx), %edx imull (%rax), %edx addl %edx, (%rsi) addq $4, %rcx addq $4096, %rax cmpq %rax, %rdi jne .L5 jmp .L17 .L6: addq $4096, %r8 cmpq $4194304, %r8 je .L7 .L4: leaq 0(%r13,%r8), %rsi leaq 4194304(%rbp), %rdi jmp .L8 .L7: call _ZNSt6chrono3_V212system_clock3nowEv@PLT subq %r12, %rax movq %rax, %rcx movabsq $2361183241434822607, %rdx imulq %rdx sarq $7, %rdx sarq $63, %rcx movq %rdx, %rbx subq %rcx, %rbx movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax leaq _ZSt4cout(%rip), %rdx movq 240(%rdx,%rax), %rbp testq %rbp, %rbp je .L18 cmpb $0, 56(%rbp) je .L10 movzbl 67(%rbp), %eax .L11: movsbl %al, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movq %rax, %rbp movl $16, %edx leaq .LC0(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq %rbx, %rsi movq %rbp, %rdi call _ZNSo9_M_insertIlEERSoT_@PLT movq %rax, %rbp movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbp,%rax), %rbx testq %rbx, %rbx je .L19 cmpb $0, 56(%rbx) je .L13 movzbl 67(%rbx), %eax .L14: movsbl %al, %esi movq %rbp, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L18: .cfi_restore_state call _ZSt16__throw_bad_castv@PLT .L10: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) jmp .L11 .L19: call _ZSt16__throw_bad_castv@PLT .L13: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) jmp .L14 .cfi_endproc .LFE4131: .size _Z14cpu_matrix_mulPA1024_iS0_S0_, .-_Z14cpu_matrix_mulPA1024_iS0_S0_ .globl _Z38__device_stub__Z14gpu_matrix_mulPiS_S_PiS_S_ .type _Z38__device_stub__Z14gpu_matrix_mulPiS_S_PiS_S_, @function _Z38__device_stub__Z14gpu_matrix_mulPiS_S_PiS_S_: .LFB4159: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L24 .L20: movq 120(%rsp), %rax subq %fs:40, %rax jne .L25 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L24: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14gpu_matrix_mulPiS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L20 .L25: call __stack_chk_fail@PLT .cfi_endproc .LFE4159: .size _Z38__device_stub__Z14gpu_matrix_mulPiS_S_PiS_S_, .-_Z38__device_stub__Z14gpu_matrix_mulPiS_S_PiS_S_ .globl _Z14gpu_matrix_mulPiS_S_ .type _Z14gpu_matrix_mulPiS_S_, @function _Z14gpu_matrix_mulPiS_S_: .LFB4160: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z38__device_stub__Z14gpu_matrix_mulPiS_S_PiS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4160: .size _Z14gpu_matrix_mulPiS_S_, .-_Z14gpu_matrix_mulPiS_S_ .section .rodata.str1.1 .LC1: .string " GPU time: " .LC2: .string " " .LC3: .string "FAILED" .LC4: .string "PASSED" .text .globl main .type main, @function main: .LFB4134: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 leaq -12582912(%rsp), %r11 .cfi_def_cfa 11, 12582944 .LPSRL0: subq $4096, %rsp orq $0, (%rsp) cmpq %r11, %rsp jne .LPSRL0 .cfi_def_cfa_register 7 subq $64, %rsp .cfi_def_cfa_offset 12583008 movq %fs:40, %rax movq %rax, 12582968(%rsp) xorl %eax, %eax movl $4096, %ebp .L29: leaq -4096(%rbp), %rbx .L30: call rand@PLT movslq %eax, %rdx imulq $-542367883, %rdx, %rdx shrq $32, %rdx addl %eax, %edx sarl $8, %edx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx imull $293, %edx, %edx subl %edx, %eax movl %eax, 48(%rsp,%rbx) call rand@PLT movslq %eax, %rdx imulq $1041204193, %rdx, %rdx sarq $36, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx imull $66, %edx, %edx subl %edx, %eax movl %eax, 4194352(%rsp,%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L30 addq $4096, %rbp cmpq $4198400, %rbp jne .L29 leaq 8388656(%rsp), %rdx leaq 4194352(%rsp), %rsi leaq 48(%rsp), %rdi call _Z14cpu_matrix_mulPA1024_iS0_S0_ movl $4194304, %edi call malloc@PLT movq %rax, %rbp movl $4194304, %edi call malloc@PLT movq %rax, %rbx movl $4194304, %edi call malloc@PLT movq %rax, %r12 movl $4096, %ecx .L32: leaq -4096(%rcx), %rax .L33: movl 48(%rsp,%rax), %edx movl %edx, 0(%rbp,%rax) movl 4194352(%rsp,%rax), %edx movl %edx, (%rbx,%rax) addq $4, %rax cmpq %rcx, %rax jne .L33 addq $4096, %rcx cmpq $4198400, %rcx jne .L32 movq %rsp, %rdi movl $4194304, %esi call cudaMalloc@PLT leaq 8(%rsp), %rdi movl $4194304, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movl $4194304, %esi call cudaMalloc@PLT movl $1, %ecx movl $4194304, %edx movq %rbp, %rsi movq (%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $4194304, %edx movq %rbx, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $64, 24(%rsp) movl $64, 28(%rsp) movl $1, 32(%rsp) movl $16, 36(%rsp) movl $16, 40(%rsp) movl $1, 44(%rsp) call _ZNSt6chrono3_V212system_clock3nowEv@PLT movq %rax, %rbx movl 44(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 36(%rsp), %rdx movq 24(%rsp), %rdi movl 32(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L46 .L35: call _ZNSt6chrono3_V212system_clock3nowEv@PLT subq %rbx, %rax movq %rax, %rcx movabsq $2361183241434822607, %rdx imulq %rdx sarq $7, %rdx sarq $63, %rcx movq %rdx, %rbx subq %rcx, %rbx leaq _ZSt4cout(%rip), %rbp movq %rbp, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq %rax, %rdi leaq .LC1(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movq %rbx, %rsi call _ZNSo9_M_insertIlEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $2, %ecx movl $4194304, %edx movq 16(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movl (%r12), %esi movq %rbp, %rdi call _ZNSolsEi@PLT movq %rax, %rdi leaq .LC2(%rip), %rbx movq %rbx, %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl 8388656(%rsp), %esi call _ZNSolsEi@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl 4(%r12), %esi movq %rbp, %rdi call _ZNSolsEi@PLT movq %rax, %rdi movq %rbx, %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl 8388660(%rsp), %esi call _ZNSolsEi@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq %r12, %rdx leaq 8388656(%rsp), %rcx addq $4194304, %r12 .L36: movl $0, %eax .L39: movl (%rcx,%rax), %esi cmpl %esi, (%rdx,%rax) jne .L47 addq $4, %rax cmpq $4096, %rax jne .L39 addq $4096, %rdx addq $4096, %rcx cmpq %r12, %rdx jne .L36 leaq _ZSt4cout(%rip), %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq %rax, %rdi leaq .LC4(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $0, %eax jmp .L28 .L46: movq 16(%rsp), %rdx movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z38__device_stub__Z14gpu_matrix_mulPiS_S_PiS_S_ jmp .L35 .L47: leaq _ZSt4cout(%rip), %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq %rax, %rdi leaq .LC3(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $-1, %eax .L28: movq 12582968(%rsp), %rdx subq %fs:40, %rdx jne .L48 addq $12582976, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L48: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE4134: .size main, .-main .section .rodata.str1.1 .LC5: .string "_Z14gpu_matrix_mulPiS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB4162: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _Z14gpu_matrix_mulPiS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4162: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <vector> #include <cmath> #include <chrono> using namespace std; using namespace std::chrono; #define BLOCK_SIZE 16 #define N 1024 __global__ void gpu_matrix_mul(int *a, int *b, int *c){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if(col < N && row < N){ for(int i = 0;i < N; i++){ sum += a[row*N + i] * b[i*N + col]; } c[row*N + col] = sum; } } void cpu_matrix_mul(int A[N][N], int B[N][N], int C[N][N]){ auto start = high_resolution_clock::now(); for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ for(int k = 0;k < N;k ++){ C[i][j] += A[i][k]*B[k][j]; } } } auto stop = high_resolution_clock::now(); auto cpu_time = duration_cast<microseconds>(stop - start).count(); cout << endl << " CPU exec time: " << cpu_time << endl; } int main(){ //CPU duration count int CPU_A[N][N], CPU_B[N][N], CPU_C[N][N]; for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ CPU_A[i][j] = rand()%293; CPU_B[i][j] = rand()%66; } } cpu_matrix_mul(CPU_A, CPU_B, CPU_C); //GPU duration count int *host_a, *host_b, *host_c, *device_a, *device_b, *device_c; host_a = (int *)malloc((N*N) * sizeof(int)); host_b = (int *)malloc((N*N) * sizeof(int)); host_c = (int *)malloc((N*N) * sizeof(int)); for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ host_a[i * N + j] = CPU_A[i][j]; host_b[i * N + j] = CPU_B[i][j]; } } cudaMalloc(&device_a, (N*N)*sizeof(int)); cudaMalloc(&device_b, (N*N)*sizeof(int)); cudaMalloc(&device_c, (N*N)*sizeof(int)); cudaMemcpy(device_a, host_a, (N*N)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_b, host_b, (N*N)*sizeof(int), cudaMemcpyHostToDevice); dim3 dimGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); auto start = high_resolution_clock::now(); gpu_matrix_mul<<<dimGrid, dimBlock>>>(device_a, device_b, device_c); auto stop = high_resolution_clock::now(); auto gpu_time = duration_cast<microseconds>(stop - start).count(); cout << endl << " GPU time: " << gpu_time << endl; cudaMemcpy(host_c, device_c, (N*N)*sizeof(int), cudaMemcpyDeviceToHost); //Verify cout << host_c[0] << " " << CPU_C[0][0] << endl; cout << host_c[1] << " " << CPU_C[0][1] << endl; for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ if(host_c[i * N + j] != CPU_C[i][j]){ cout << endl << "FAILED" << endl; return -1; } } } cout << endl << "PASSED" << endl; }
#include <hip/hip_runtime.h> #include <iostream> #include <vector> #include <cmath> #include <chrono> using namespace std; using namespace std::chrono; #define BLOCK_SIZE 16 #define N 1024 __global__ void gpu_matrix_mul(int *a, int *b, int *c){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if(col < N && row < N){ for(int i = 0;i < N; i++){ sum += a[row*N + i] * b[i*N + col]; } c[row*N + col] = sum; } } void cpu_matrix_mul(int A[N][N], int B[N][N], int C[N][N]){ auto start = high_resolution_clock::now(); for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ for(int k = 0;k < N;k ++){ C[i][j] += A[i][k]*B[k][j]; } } } auto stop = high_resolution_clock::now(); auto cpu_time = duration_cast<microseconds>(stop - start).count(); cout << endl << " CPU exec time: " << cpu_time << endl; } int main(){ //CPU duration count int CPU_A[N][N], CPU_B[N][N], CPU_C[N][N]; for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ CPU_A[i][j] = rand()%293; CPU_B[i][j] = rand()%66; } } cpu_matrix_mul(CPU_A, CPU_B, CPU_C); //GPU duration count int *host_a, *host_b, *host_c, *device_a, *device_b, *device_c; host_a = (int *)malloc((N*N) * sizeof(int)); host_b = (int *)malloc((N*N) * sizeof(int)); host_c = (int *)malloc((N*N) * sizeof(int)); for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ host_a[i * N + j] = CPU_A[i][j]; host_b[i * N + j] = CPU_B[i][j]; } } hipMalloc(&device_a, (N*N)*sizeof(int)); hipMalloc(&device_b, (N*N)*sizeof(int)); hipMalloc(&device_c, (N*N)*sizeof(int)); hipMemcpy(device_a, host_a, (N*N)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(device_b, host_b, (N*N)*sizeof(int), hipMemcpyHostToDevice); dim3 dimGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); auto start = high_resolution_clock::now(); gpu_matrix_mul<<<dimGrid, dimBlock>>>(device_a, device_b, device_c); auto stop = high_resolution_clock::now(); auto gpu_time = duration_cast<microseconds>(stop - start).count(); cout << endl << " GPU time: " << gpu_time << endl; hipMemcpy(host_c, device_c, (N*N)*sizeof(int), hipMemcpyDeviceToHost); //Verify cout << host_c[0] << " " << CPU_C[0][0] << endl; cout << host_c[1] << " " << CPU_C[0][1] << endl; for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ if(host_c[i * N + j] != CPU_C[i][j]){ cout << endl << "FAILED" << endl; return -1; } } } cout << endl << "PASSED" << endl; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <vector> #include <cmath> #include <chrono> using namespace std; using namespace std::chrono; #define BLOCK_SIZE 16 #define N 1024 __global__ void gpu_matrix_mul(int *a, int *b, int *c){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if(col < N && row < N){ for(int i = 0;i < N; i++){ sum += a[row*N + i] * b[i*N + col]; } c[row*N + col] = sum; } } void cpu_matrix_mul(int A[N][N], int B[N][N], int C[N][N]){ auto start = high_resolution_clock::now(); for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ for(int k = 0;k < N;k ++){ C[i][j] += A[i][k]*B[k][j]; } } } auto stop = high_resolution_clock::now(); auto cpu_time = duration_cast<microseconds>(stop - start).count(); cout << endl << " CPU exec time: " << cpu_time << endl; } int main(){ //CPU duration count int CPU_A[N][N], CPU_B[N][N], CPU_C[N][N]; for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ CPU_A[i][j] = rand()%293; CPU_B[i][j] = rand()%66; } } cpu_matrix_mul(CPU_A, CPU_B, CPU_C); //GPU duration count int *host_a, *host_b, *host_c, *device_a, *device_b, *device_c; host_a = (int *)malloc((N*N) * sizeof(int)); host_b = (int *)malloc((N*N) * sizeof(int)); host_c = (int *)malloc((N*N) * sizeof(int)); for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ host_a[i * N + j] = CPU_A[i][j]; host_b[i * N + j] = CPU_B[i][j]; } } hipMalloc(&device_a, (N*N)*sizeof(int)); hipMalloc(&device_b, (N*N)*sizeof(int)); hipMalloc(&device_c, (N*N)*sizeof(int)); hipMemcpy(device_a, host_a, (N*N)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(device_b, host_b, (N*N)*sizeof(int), hipMemcpyHostToDevice); dim3 dimGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); auto start = high_resolution_clock::now(); gpu_matrix_mul<<<dimGrid, dimBlock>>>(device_a, device_b, device_c); auto stop = high_resolution_clock::now(); auto gpu_time = duration_cast<microseconds>(stop - start).count(); cout << endl << " GPU time: " << gpu_time << endl; hipMemcpy(host_c, device_c, (N*N)*sizeof(int), hipMemcpyDeviceToHost); //Verify cout << host_c[0] << " " << CPU_C[0][0] << endl; cout << host_c[1] << " " << CPU_C[0][1] << endl; for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ if(host_c[i * N + j] != CPU_C[i][j]){ cout << endl << "FAILED" << endl; return -1; } } } cout << endl << "PASSED" << endl; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z14gpu_matrix_mulPiS_S_ .globl _Z14gpu_matrix_mulPiS_S_ .p2align 8 .type _Z14gpu_matrix_mulPiS_S_,@function _Z14gpu_matrix_mulPiS_S_: s_load_b32 s2, s[0:1], 0x24 v_bfe_u32 v2, v0, 10, 10 v_and_b32_e32 v3, 0x3ff, v0 s_waitcnt lgkmcnt(0) s_lshr_b32 s3, s2, 16 s_and_b32 s2, s2, 0xffff s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_mad_u64_u32 v[0:1], null, s15, s3, v[2:3] v_mad_u64_u32 v[1:2], null, s14, s2, v[3:4] s_mov_b32 s2, exec_lo v_max_i32_e32 v2, v0, v1 s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_i32_e32 0x400, v2 s_cbranch_execz .LBB0_4 s_load_b128 s[4:7], s[0:1], 0x0 v_lshlrev_b32_e32 v2, 10, v0 v_mov_b32_e32 v4, 0 s_mov_b64 s[2:3], 0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[2:3], 2, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v7, vcc_lo, s4, v2 v_add_co_ci_u32_e32 v8, vcc_lo, s5, v3, vcc_lo v_mov_b32_e32 v2, v1 .p2align 6 .LBB0_2: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4) v_ashrrev_i32_e32 v3, 31, v2 v_add_co_u32 v5, vcc_lo, v7, s2 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_ci_u32_e32 v6, vcc_lo, s3, v8, vcc_lo v_lshlrev_b64 v[9:10], 2, v[2:3] v_add_nc_u32_e32 v2, 0x400, v2 s_add_u32 s2, s2, 4 s_addc_u32 s3, s3, 0 s_cmpk_eq_i32 s2, 0x1000 s_delay_alu instid0(VALU_DEP_2) v_add_co_u32 v9, vcc_lo, s6, v9 v_add_co_ci_u32_e32 v10, vcc_lo, s7, v10, vcc_lo global_load_b32 v3, v[5:6], off global_load_b32 v9, v[9:10], off s_waitcnt vmcnt(0) v_mad_u64_u32 v[5:6], null, v9, v3, v[4:5] s_delay_alu instid0(VALU_DEP_1) v_mov_b32_e32 v4, v5 s_cbranch_scc0 .LBB0_2 s_load_b64 s[0:1], s[0:1], 0x10 v_lshl_add_u32 v0, v0, 10, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v1, 31, v0 v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v5, off .LBB0_4: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14gpu_matrix_mulPiS_S_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 11 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z14gpu_matrix_mulPiS_S_, .Lfunc_end0-_Z14gpu_matrix_mulPiS_S_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14gpu_matrix_mulPiS_S_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14gpu_matrix_mulPiS_S_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 11 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <vector> #include <cmath> #include <chrono> using namespace std; using namespace std::chrono; #define BLOCK_SIZE 16 #define N 1024 __global__ void gpu_matrix_mul(int *a, int *b, int *c){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if(col < N && row < N){ for(int i = 0;i < N; i++){ sum += a[row*N + i] * b[i*N + col]; } c[row*N + col] = sum; } } void cpu_matrix_mul(int A[N][N], int B[N][N], int C[N][N]){ auto start = high_resolution_clock::now(); for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ for(int k = 0;k < N;k ++){ C[i][j] += A[i][k]*B[k][j]; } } } auto stop = high_resolution_clock::now(); auto cpu_time = duration_cast<microseconds>(stop - start).count(); cout << endl << " CPU exec time: " << cpu_time << endl; } int main(){ //CPU duration count int CPU_A[N][N], CPU_B[N][N], CPU_C[N][N]; for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ CPU_A[i][j] = rand()%293; CPU_B[i][j] = rand()%66; } } cpu_matrix_mul(CPU_A, CPU_B, CPU_C); //GPU duration count int *host_a, *host_b, *host_c, *device_a, *device_b, *device_c; host_a = (int *)malloc((N*N) * sizeof(int)); host_b = (int *)malloc((N*N) * sizeof(int)); host_c = (int *)malloc((N*N) * sizeof(int)); for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ host_a[i * N + j] = CPU_A[i][j]; host_b[i * N + j] = CPU_B[i][j]; } } hipMalloc(&device_a, (N*N)*sizeof(int)); hipMalloc(&device_b, (N*N)*sizeof(int)); hipMalloc(&device_c, (N*N)*sizeof(int)); hipMemcpy(device_a, host_a, (N*N)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(device_b, host_b, (N*N)*sizeof(int), hipMemcpyHostToDevice); dim3 dimGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); auto start = high_resolution_clock::now(); gpu_matrix_mul<<<dimGrid, dimBlock>>>(device_a, device_b, device_c); auto stop = high_resolution_clock::now(); auto gpu_time = duration_cast<microseconds>(stop - start).count(); cout << endl << " GPU time: " << gpu_time << endl; hipMemcpy(host_c, device_c, (N*N)*sizeof(int), hipMemcpyDeviceToHost); //Verify cout << host_c[0] << " " << CPU_C[0][0] << endl; cout << host_c[1] << " " << CPU_C[0][1] << endl; for(int i = 0;i < N;i ++){ for(int j = 0;j < N;j ++){ if(host_c[i * N + j] != CPU_C[i][j]){ cout << endl << "FAILED" << endl; return -1; } } } cout << endl << "PASSED" << endl; }
.text .file "matrixMultiplication.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z29__device_stub__gpu_matrix_mulPiS_S_ # -- Begin function _Z29__device_stub__gpu_matrix_mulPiS_S_ .p2align 4, 0x90 .type _Z29__device_stub__gpu_matrix_mulPiS_S_,@function _Z29__device_stub__gpu_matrix_mulPiS_S_: # @_Z29__device_stub__gpu_matrix_mulPiS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14gpu_matrix_mulPiS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z29__device_stub__gpu_matrix_mulPiS_S_, .Lfunc_end0-_Z29__device_stub__gpu_matrix_mulPiS_S_ .cfi_endproc # -- End function .globl _Z14cpu_matrix_mulPA1024_iS0_S0_ # -- Begin function _Z14cpu_matrix_mulPA1024_iS0_S0_ .p2align 4, 0x90 .type _Z14cpu_matrix_mulPA1024_iS0_S0_,@function _Z14cpu_matrix_mulPA1024_iS0_S0_: # @_Z14cpu_matrix_mulPA1024_iS0_S0_ .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r12 xorl %r13d, %r13d callq _ZNSt6chrono3_V212system_clock3nowEv movq %rax, %r15 .p2align 4, 0x90 .LBB1_1: # %.preheader25 # =>This Loop Header: Depth=1 # Child Loop BB1_2 Depth 2 # Child Loop BB1_3 Depth 3 movq %r13, %rax shlq $12, %rax addq %rbx, %rax movq %r14, %rcx xorl %edx, %edx .p2align 4, 0x90 .LBB1_2: # %.preheader # Parent Loop BB1_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB1_3 Depth 3 leaq (%rax,%rdx,4), %rsi movl (%rax,%rdx,4), %edi movq %rcx, %r8 xorl %r9d, %r9d .p2align 4, 0x90 .LBB1_3: # Parent Loop BB1_1 Depth=1 # Parent Loop BB1_2 Depth=2 # => This Inner Loop Header: Depth=3 movl (%r8), %r10d imull (%r12,%r9,4), %r10d addl %r10d, %edi movl %edi, (%rsi) incq %r9 addq $4096, %r8 # imm = 0x1000 cmpq $1024, %r9 # imm = 0x400 jne .LBB1_3 # %bb.4: # in Loop: Header=BB1_2 Depth=2 incq %rdx addq $4, %rcx cmpq $1024, %rdx # imm = 0x400 jne .LBB1_2 # %bb.5: # in Loop: Header=BB1_1 Depth=1 incq %r13 addq $4096, %r12 # imm = 0x1000 cmpq $1024, %r13 # imm = 0x400 jne .LBB1_1 # %bb.6: callq _ZNSt6chrono3_V212system_clock3nowEv subq %r15, %rax movabsq $2361183241434822607, %rcx # imm = 0x20C49BA5E353F7CF imulq %rcx movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r14 testq %r14, %r14 je .LBB1_15 # %bb.7: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i movq %rdx, %rbx movq %rdx, %rax shrq $63, %rax sarq $7, %rbx addq %rax, %rbx cmpb $0, 56(%r14) je .LBB1_9 # %bb.8: movzbl 67(%r14), %eax jmp .LBB1_10 .LBB1_9: movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) .LBB1_10: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq %rax, %r14 movl $.L.str, %esi movl $16, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq %r14, %rdi movq %rbx, %rsi callq _ZNSo9_M_insertIlEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB1_15 # %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i18 cmpb $0, 56(%rbx) je .LBB1_13 # %bb.12: movzbl 67(%rbx), %ecx jmp .LBB1_14 .LBB1_13: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB1_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit21 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 jmp _ZNSo5flushEv # TAILCALL .LBB1_15: .cfi_def_cfa_offset 48 callq _ZSt16__throw_bad_castv .Lfunc_end1: .size _Z14cpu_matrix_mulPA1024_iS0_S0_, .Lfunc_end1-_Z14cpu_matrix_mulPA1024_iS0_S0_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $12583064, %rsp # imm = 0xC00098 .cfi_def_cfa_offset 12583120 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 leaq 8388752(%rsp), %rbx leaq 4194448(%rsp), %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_1: # %.preheader97 # =>This Loop Header: Depth=1 # Child Loop BB2_2 Depth 2 xorl %r12d, %r12d .p2align 4, 0x90 .LBB2_2: # Parent Loop BB2_1 Depth=1 # => This Inner Loop Header: Depth=2 callq rand cltq imulq $-542367883, %rax, %rcx # imm = 0xDFAC1F75 shrq $32, %rcx addl %eax, %ecx movl %ecx, %edx shrl $31, %edx sarl $8, %ecx addl %edx, %ecx imull $293, %ecx, %ecx # imm = 0x125 subl %ecx, %eax movl %eax, (%rbx,%r12,4) callq rand cltq imulq $1041204193, %rax, %rcx # imm = 0x3E0F83E1 movq %rcx, %rdx shrq $63, %rdx sarq $36, %rcx addl %edx, %ecx movl %ecx, %edx shll $6, %edx leal (%rdx,%rcx,2), %ecx subl %ecx, %eax movl %eax, (%r14,%r12,4) incq %r12 cmpq $1024, %r12 # imm = 0x400 jne .LBB2_2 # %bb.3: # in Loop: Header=BB2_1 Depth=1 incq %r15 addq $4096, %rbx # imm = 0x1000 addq $4096, %r14 # imm = 0x1000 cmpq $1024, %r15 # imm = 0x400 jne .LBB2_1 # %bb.4: leaq 8388752(%rsp), %r13 leaq 4194448(%rsp), %r12 leaq 144(%rsp), %rdx movq %r13, %rdi movq %r12, %rsi callq _Z14cpu_matrix_mulPA1024_iS0_S0_ movl $4194304, %edi # imm = 0x400000 callq malloc movq %rax, %r15 movl $4194304, %edi # imm = 0x400000 callq malloc movq %rax, %r14 movl $4194304, %edi # imm = 0x400000 callq malloc movq %rax, (%rsp) # 8-byte Spill movl $4194304, %edx # imm = 0x400000 movq %r15, %rdi movq %r13, %rsi callq memcpy@PLT movl $4194304, %edx # imm = 0x400000 movq %r14, %rdi movq %r12, %rsi callq memcpy@PLT leaq 32(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc leaq 24(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc leaq 16(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc movq 32(%rsp), %rdi movl $4194304, %edx # imm = 0x400000 movq %r15, %rsi movl $1, %ecx callq hipMemcpy movq 24(%rsp), %rdi movl $4194304, %edx # imm = 0x400000 movq %r14, %rsi movl $1, %ecx callq hipMemcpy callq _ZNSt6chrono3_V212system_clock3nowEv movq %rax, %r14 movabsq $274877907008, %rdi # imm = 0x4000000040 movabsq $68719476752, %rdx # imm = 0x1000000010 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_6 # %bb.5: movq 32(%rsp), %rax movq 24(%rsp), %rcx movq 16(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z14gpu_matrix_mulPiS_S_, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_6: callq _ZNSt6chrono3_V212system_clock3nowEv subq %r14, %rax movabsq $2361183241434822607, %rcx # imm = 0x20C49BA5E353F7CF imulq %rcx movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r15 testq %r15, %r15 je .LBB2_52 # %bb.7: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i movq %rdx, %r14 movq %rdx, %rax shrq $63, %rax sarq $7, %r14 addq %rax, %r14 cmpb $0, 56(%r15) je .LBB2_9 # %bb.8: movzbl 67(%r15), %eax jmp .LBB2_10 .LBB2_9: movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r15), %rax movq %r15, %rdi movl $10, %esi callq *48(%rax) .LBB2_10: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq %rax, %r15 movl $.L.str.1, %esi movl $11, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq %r15, %rdi movq %r14, %rsi callq _ZNSo9_M_insertIlEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r14 testq %r14, %r14 je .LBB2_52 # %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i56 cmpb $0, 56(%r14) je .LBB2_13 # %bb.12: movzbl 67(%r14), %ecx jmp .LBB2_14 .LBB2_13: movq %r14, %rdi movq %rax, %rbx callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %rbx, %rax .LBB2_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit59 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq 16(%rsp), %rsi movl $4194304, %edx # imm = 0x400000 movq (%rsp), %rbx # 8-byte Reload movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movl (%rbx), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movq %rax, %r14 movl $.L.str.2, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl 144(%rsp), %esi movq %r14, %rdi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r14 testq %r14, %r14 je .LBB2_52 # %bb.15: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i61 cmpb $0, 56(%r14) je .LBB2_17 # %bb.16: movzbl 67(%r14), %ecx jmp .LBB2_18 .LBB2_17: movq %r14, %rdi movq %rax, %rbx callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %rbx, %rax .LBB2_18: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit64 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq (%rsp), %rax # 8-byte Reload movl 4(%rax), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movq %rax, %r14 movl $.L.str.2, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl 148(%rsp), %esi movq %r14, %rdi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r14 testq %r14, %r14 je .LBB2_52 # %bb.19: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i66 cmpb $0, 56(%r14) je .LBB2_21 # %bb.20: movzbl 67(%r14), %ecx jmp .LBB2_22 .LBB2_21: movq %r14, %rdi movq %rax, %rbx callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %rbx, %rax .LBB2_22: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit69 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq (%rsp), %r12 # 8-byte Reload addq $4, %r12 xorl %ebx, %ebx leaq 148(%rsp), %r13 xorl %ebp, %ebp xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_24: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB2_27 Depth 2 movq %rbx, %rax shlq $12, %rax movq (%rsp), %rcx # 8-byte Reload movl (%rcx,%rax), %ecx cmpl 144(%rsp,%rax), %ecx jne .LBB2_25 # %bb.26: # %.lr.ph.preheader # in Loop: Header=BB2_24 Depth=1 xorl %eax, %eax .p2align 4, 0x90 .LBB2_27: # %.lr.ph # Parent Loop BB2_24 Depth=1 # => This Inner Loop Header: Depth=2 cmpq $1023, %rax # imm = 0x3FF je .LBB2_39 # %bb.28: # in Loop: Header=BB2_27 Depth=2 movl (%r12,%rax,4), %edx leaq 1(%rax), %rcx cmpl (%r13,%rax,4), %edx movq %rcx, %rax je .LBB2_27 # %bb.29: # %._crit_edge.loopexit # in Loop: Header=BB2_24 Depth=1 decq %rcx cmpq $1023, %rcx # imm = 0x3FF setae %cl jmp .LBB2_30 .p2align 4, 0x90 .LBB2_25: # in Loop: Header=BB2_24 Depth=1 xorl %ecx, %ecx .LBB2_30: # %._crit_edge # in Loop: Header=BB2_24 Depth=1 movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r14 testq %r14, %r14 je .LBB2_52 # %bb.31: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i71 # in Loop: Header=BB2_24 Depth=1 movl %ecx, 12(%rsp) # 4-byte Spill cmpb $0, 56(%r14) je .LBB2_33 # %bb.32: # in Loop: Header=BB2_24 Depth=1 movzbl 67(%r14), %eax jmp .LBB2_34 .p2align 4, 0x90 .LBB2_33: # in Loop: Header=BB2_24 Depth=1 movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) .LBB2_34: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit74 # in Loop: Header=BB2_24 Depth=1 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq %rax, %r14 movl $.L.str.3, %esi movl $6, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%r14), %rax movq -24(%rax), %rax movq 240(%r14,%rax), %r15 testq %r15, %r15 je .LBB2_52 # %bb.35: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i76 # in Loop: Header=BB2_24 Depth=1 cmpb $0, 56(%r15) je .LBB2_37 # %bb.36: # in Loop: Header=BB2_24 Depth=1 movzbl 67(%r15), %eax jmp .LBB2_38 .p2align 4, 0x90 .LBB2_37: # in Loop: Header=BB2_24 Depth=1 movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r15), %rax movq %r15, %rdi movl $10, %esi callq *48(%rax) .LBB2_38: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit79 # in Loop: Header=BB2_24 Depth=1 movsbl %al, %esi movq %r14, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $-1, %r15d movl 12(%rsp), %eax # 4-byte Reload testb %al, %al jne .LBB2_23 jmp .LBB2_41 .p2align 4, 0x90 .LBB2_39: # %.loopexit.loopexit # in Loop: Header=BB2_24 Depth=1 setae %al testb %al, %al je .LBB2_41 .LBB2_23: # in Loop: Header=BB2_24 Depth=1 cmpq $1023, %rbx # imm = 0x3FF leaq 1(%rbx), %rax setae %bpl addq $4096, %r12 # imm = 0x1000 addq $4096, %r13 # imm = 0x1000 movq %rax, %rbx cmpq $1024, %rax # imm = 0x400 jne .LBB2_24 .LBB2_41: testb $1, %bpl je .LBB2_51 # %bb.42: movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB2_52 # %bb.43: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i81 cmpb $0, 56(%rbx) je .LBB2_45 # %bb.44: movzbl 67(%rbx), %eax jmp .LBB2_46 .LBB2_45: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB2_46: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit84 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq %rax, %rbx movl $.L.str.4, %esi movl $6, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r14 testq %r14, %r14 je .LBB2_52 # %bb.47: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i86 cmpb $0, 56(%r14) je .LBB2_49 # %bb.48: movzbl 67(%r14), %eax jmp .LBB2_50 .LBB2_49: movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) .LBB2_50: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit89 movsbl %al, %esi movq %rbx, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv .LBB2_51: movl %r15d, %eax addq $12583064, %rsp # imm = 0xC00098 .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB2_52: .cfi_def_cfa_offset 12583120 callq _ZSt16__throw_bad_castv .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14gpu_matrix_mulPiS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z14gpu_matrix_mulPiS_S_,@object # @_Z14gpu_matrix_mulPiS_S_ .section .rodata,"a",@progbits .globl _Z14gpu_matrix_mulPiS_S_ .p2align 3, 0x0 _Z14gpu_matrix_mulPiS_S_: .quad _Z29__device_stub__gpu_matrix_mulPiS_S_ .size _Z14gpu_matrix_mulPiS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz " CPU exec time: " .size .L.str, 17 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz " GPU time: " .size .L.str.1, 12 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz " " .size .L.str.2, 2 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "FAILED" .size .L.str.3, 7 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "PASSED" .size .L.str.4, 7 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z14gpu_matrix_mulPiS_S_" .size .L__unnamed_1, 25 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z29__device_stub__gpu_matrix_mulPiS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z14gpu_matrix_mulPiS_S_ .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z14gpu_matrix_mulPiS_S_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R9, SR_CTAID.Y ; /* 0x0000000000097919 */ /* 0x000e280000002600 */ /*0020*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */ /* 0x000e280000002200 */ /*0030*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e680000002500 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R9, R9, c[0x0][0x4], R0 ; /* 0x0000010009097a24 */ /* 0x001fca00078e0200 */ /*0060*/ ISETP.GT.AND P0, PT, R9, 0x3ff, PT ; /* 0x000003ff0900780c */ /* 0x000fe20003f04270 */ /*0070*/ IMAD R0, R2, c[0x0][0x0], R3 ; /* 0x0000000002007a24 */ /* 0x002fca00078e0203 */ /*0080*/ ISETP.GT.OR P0, PT, R0, 0x3ff, P0 ; /* 0x000003ff0000780c */ /* 0x000fda0000704670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ SHF.L.U32 R9, R9, 0xa, RZ ; /* 0x0000000a09097819 */ /* 0x000fe200000006ff */ /*00b0*/ HFMA2.MMA R8, -RZ, RZ, 0, 0 ; /* 0x00000000ff087435 */ /* 0x000fe200000001ff */ /*00c0*/ MOV R4, c[0x0][0x160] ; /* 0x0000580000047a02 */ /* 0x000fe20000000f00 */ /*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*00e0*/ MOV R5, c[0x0][0x164] ; /* 0x0000590000057a02 */ /* 0x000fe20000000f00 */ /*00f0*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */ /* 0x000fe20000000a00 */ /*0100*/ MOV R12, RZ ; /* 0x000000ff000c7202 */ /* 0x000fe40000000f00 */ /*0110*/ IADD3 R11, R9, 0x1, RZ ; /* 0x00000001090b7810 */ /* 0x000fe40007ffe0ff */ /*0120*/ MOV R6, UR6 ; /* 0x0000000600067c02 */ /* 0x000fe20008000f00 */ /*0130*/ IMAD.WIDE R14, R9, 0x4, R4 ; /* 0x00000004090e7825 */ /* 0x000fe200078e0204 */ /*0140*/ MOV R7, UR7 ; /* 0x0000000700077c02 */ /* 0x000fc60008000f00 */ /*0150*/ IMAD.WIDE R2, R11, 0x4, R4 ; /* 0x000000040b027825 */ /* 0x000fe400078e0204 */ /*0160*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */ /* 0x0000a4000c1e1900 */ /*0170*/ IMAD.WIDE R6, R0, 0x4, R6 ; /* 0x0000000400067825 */ /* 0x000fe400078e0206 */ /*0180*/ LDG.E R24, [R2.64] ; /* 0x0000000402187981 */ /* 0x000ee8000c1e1900 */ /*0190*/ LDG.E R17, [R6.64] ; /* 0x0000000406117981 */ /* 0x000ea8000c1e1900 */ /*01a0*/ LDG.E R27, [R6.64+0x1000] ; /* 0x00100004061b7981 */ /* 0x000ee8000c1e1900 */ /*01b0*/ LDG.E R16, [R2.64+0x4] ; /* 0x0000040402107981 */ /* 0x000f28000c1e1900 */ /*01c0*/ LDG.E R19, [R6.64+0x2000] ; /* 0x0020000406137981 */ /* 0x000f28000c1e1900 */ /*01d0*/ LDG.E R23, [R2.64+0x8] ; /* 0x0000080402177981 */ /* 0x000f68000c1e1900 */ /*01e0*/ LDG.E R20, [R6.64+0x3000] ; /* 0x0030000406147981 */ /* 0x000f68000c1e1900 */ /*01f0*/ LDG.E R22, [R2.64+0xc] ; /* 0x00000c0402167981 */ /* 0x000f68000c1e1900 */ /*0200*/ LDG.E R25, [R6.64+0x4000] ; /* 0x0040000406197981 */ /* 0x000f68000c1e1900 */ /*0210*/ LDG.E R18, [R2.64+0x10] ; /* 0x0000100402127981 */ /* 0x000f68000c1e1900 */ /*0220*/ LDG.E R21, [R6.64+0x5000] ; /* 0x0050000406157981 */ /* 0x000f68000c1e1900 */ /*0230*/ LDG.E R10, [R2.64+0x14] ; /* 0x00001404020a7981 */ /* 0x000f68000c1e1900 */ /*0240*/ LDG.E R13, [R6.64+0x6000] ; /* 0x00600004060d7981 */ /* 0x000f68000c1e1900 */ /*0250*/ LDG.E R15, [R6.64+0x7000] ; /* 0x00700004060f7981 */ /* 0x001f68000c1e1900 */ /*0260*/ LDG.E R26, [R6.64+0xf000] ; /* 0x00f00004061a7981 */ /* 0x000f68000c1e1900 */ /*0270*/ LDG.E R29, [R2.64+0x38] ; /* 0x00003804021d7981 */ /* 0x000f62000c1e1900 */ /*0280*/ IMAD R14, R17, R14, R12 ; /* 0x0000000e110e7224 */ /* 0x004fc600078e020c */ /*0290*/ LDG.E R12, [R2.64+0x18] ; /* 0x00001804020c7981 */ /* 0x000ea2000c1e1900 */ /*02a0*/ IMAD R24, R27, R24, R14 ; /* 0x000000181b187224 */ /* 0x008fc600078e020e */ /*02b0*/ LDG.E R14, [R2.64+0x1c] ; /* 0x00001c04020e7981 */ /* 0x000ee8000c1e1900 */ /*02c0*/ LDG.E R17, [R6.64+0x8000] ; /* 0x0080000406117981 */ /* 0x000ee2000c1e1900 */ /*02d0*/ IMAD R24, R19, R16, R24 ; /* 0x0000001013187224 */ /* 0x010fc600078e0218 */ /*02e0*/ LDG.E R16, [R2.64+0x20] ; /* 0x0000200402107981 */ /* 0x000f28000c1e1900 */ /*02f0*/ LDG.E R19, [R6.64+0x9000] ; /* 0x0090000406137981 */ /* 0x000f22000c1e1900 */ /*0300*/ IMAD R24, R20, R23, R24 ; /* 0x0000001714187224 */ /* 0x020fc600078e0218 */ /*0310*/ LDG.E R23, [R2.64+0x24] ; /* 0x0000240402177981 */ /* 0x000f68000c1e1900 */ /*0320*/ LDG.E R20, [R6.64+0xa000] ; /* 0x00a0000406147981 */ /* 0x000f62000c1e1900 */ /*0330*/ IMAD R24, R25, R22, R24 ; /* 0x0000001619187224 */ /* 0x000fc600078e0218 */ /*0340*/ LDG.E R25, [R2.64+0x28] ; /* 0x0000280402197981 */ /* 0x000f68000c1e1900 */ /*0350*/ LDG.E R22, [R6.64+0xb000] ; /* 0x00b0000406167981 */ /* 0x000f62000c1e1900 */ /*0360*/ IMAD R24, R21, R18, R24 ; /* 0x0000001215187224 */ /* 0x000fc600078e0218 */ /*0370*/ LDG.E R21, [R2.64+0x2c] ; /* 0x00002c0402157981 */ /* 0x000f68000c1e1900 */ /*0380*/ LDG.E R18, [R6.64+0xc000] ; /* 0x00c0000406127981 */ /* 0x000f62000c1e1900 */ /*0390*/ IMAD R28, R13, R10, R24 ; /* 0x0000000a0d1c7224 */ /* 0x000fc600078e0218 */ /*03a0*/ LDG.E R24, [R2.64+0x30] ; /* 0x0000300402187981 */ /* 0x000f68000c1e1900 */ /*03b0*/ LDG.E R13, [R6.64+0xd000] ; /* 0x00d00004060d7981 */ /* 0x000f68000c1e1900 */ /*03c0*/ LDG.E R10, [R6.64+0xe000] ; /* 0x00e00004060a7981 */ /* 0x000f68000c1e1900 */ /*03d0*/ LDG.E R27, [R2.64+0x34] ; /* 0x00003404021b7981 */ /* 0x000f62000c1e1900 */ /*03e0*/ IADD3 R8, R8, 0x10, RZ ; /* 0x0000001008087810 */ /* 0x000fc80007ffe0ff */ /*03f0*/ ISETP.NE.AND P0, PT, R8, 0x400, PT ; /* 0x000004000800780c */ /* 0x000fe20003f05270 */ /*0400*/ UIADD3 UR6, UP0, UR6, 0x10000, URZ ; /* 0x0001000006067890 */ /* 0x000fe2000ff1e03f */ /*0410*/ IADD3 R4, P1, R4, 0x40, RZ ; /* 0x0000004004047810 */ /* 0x000fc60007f3e0ff */ /*0420*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */ /* 0x000fe200087fe43f */ /*0430*/ IADD3.X R5, RZ, R5, RZ, P1, !PT ; /* 0x00000005ff057210 */ /* 0x000fe20000ffe4ff */ /*0440*/ IMAD R12, R15, R12, R28 ; /* 0x0000000c0f0c7224 */ /* 0x004fc800078e021c */ /*0450*/ IMAD R12, R17, R14, R12 ; /* 0x0000000e110c7224 */ /* 0x008fc800078e020c */ /*0460*/ IMAD R12, R19, R16, R12 ; /* 0x00000010130c7224 */ /* 0x010fc800078e020c */ /*0470*/ IMAD R12, R20, R23, R12 ; /* 0x00000017140c7224 */ /* 0x020fc800078e020c */ /*0480*/ IMAD R12, R22, R25, R12 ; /* 0x00000019160c7224 */ /* 0x000fc800078e020c */ /*0490*/ IMAD R12, R18, R21, R12 ; /* 0x00000015120c7224 */ /* 0x000fc800078e020c */ /*04a0*/ IMAD R12, R13, R24, R12 ; /* 0x000000180d0c7224 */ /* 0x000fc800078e020c */ /*04b0*/ IMAD R12, R10, R27, R12 ; /* 0x0000001b0a0c7224 */ /* 0x000fc800078e020c */ /*04c0*/ IMAD R12, R26, R29, R12 ; /* 0x0000001d1a0c7224 */ /* 0x000fe200078e020c */ /*04d0*/ @P0 BRA 0x120 ; /* 0xfffffc4000000947 */ /* 0x000fea000383ffff */ /*04e0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fe200000001ff */ /*04f0*/ IADD3 R2, R0, R9, RZ ; /* 0x0000000900027210 */ /* 0x000fd20007ffe0ff */ /*0500*/ IMAD.WIDE R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fca00078e0203 */ /*0510*/ STG.E [R2.64], R12 ; /* 0x0000000c02007986 */ /* 0x000fe2000c101904 */ /*0520*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0530*/ BRA 0x530; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0540*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0550*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0560*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0570*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0580*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0590*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z14gpu_matrix_mulPiS_S_ .globl _Z14gpu_matrix_mulPiS_S_ .p2align 8 .type _Z14gpu_matrix_mulPiS_S_,@function _Z14gpu_matrix_mulPiS_S_: s_load_b32 s2, s[0:1], 0x24 v_bfe_u32 v2, v0, 10, 10 v_and_b32_e32 v3, 0x3ff, v0 s_waitcnt lgkmcnt(0) s_lshr_b32 s3, s2, 16 s_and_b32 s2, s2, 0xffff s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_mad_u64_u32 v[0:1], null, s15, s3, v[2:3] v_mad_u64_u32 v[1:2], null, s14, s2, v[3:4] s_mov_b32 s2, exec_lo v_max_i32_e32 v2, v0, v1 s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_i32_e32 0x400, v2 s_cbranch_execz .LBB0_4 s_load_b128 s[4:7], s[0:1], 0x0 v_lshlrev_b32_e32 v2, 10, v0 v_mov_b32_e32 v4, 0 s_mov_b64 s[2:3], 0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[2:3], 2, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v7, vcc_lo, s4, v2 v_add_co_ci_u32_e32 v8, vcc_lo, s5, v3, vcc_lo v_mov_b32_e32 v2, v1 .p2align 6 .LBB0_2: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4) v_ashrrev_i32_e32 v3, 31, v2 v_add_co_u32 v5, vcc_lo, v7, s2 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_ci_u32_e32 v6, vcc_lo, s3, v8, vcc_lo v_lshlrev_b64 v[9:10], 2, v[2:3] v_add_nc_u32_e32 v2, 0x400, v2 s_add_u32 s2, s2, 4 s_addc_u32 s3, s3, 0 s_cmpk_eq_i32 s2, 0x1000 s_delay_alu instid0(VALU_DEP_2) v_add_co_u32 v9, vcc_lo, s6, v9 v_add_co_ci_u32_e32 v10, vcc_lo, s7, v10, vcc_lo global_load_b32 v3, v[5:6], off global_load_b32 v9, v[9:10], off s_waitcnt vmcnt(0) v_mad_u64_u32 v[5:6], null, v9, v3, v[4:5] s_delay_alu instid0(VALU_DEP_1) v_mov_b32_e32 v4, v5 s_cbranch_scc0 .LBB0_2 s_load_b64 s[0:1], s[0:1], 0x10 v_lshl_add_u32 v0, v0, 10, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v1, 31, v0 v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v5, off .LBB0_4: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14gpu_matrix_mulPiS_S_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 11 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z14gpu_matrix_mulPiS_S_, .Lfunc_end0-_Z14gpu_matrix_mulPiS_S_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14gpu_matrix_mulPiS_S_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14gpu_matrix_mulPiS_S_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 11 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0003d238_00000000-6_matrixMultiplication.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB4137: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4137: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string " CPU exec time: " .text .globl _Z14cpu_matrix_mulPA1024_iS0_S0_ .type _Z14cpu_matrix_mulPA1024_iS0_S0_, @function _Z14cpu_matrix_mulPA1024_iS0_S0_: .LFB4131: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $8, %rsp .cfi_def_cfa_offset 48 movq %rdi, %rbx movq %rsi, %rbp movq %rdx, %r13 call _ZNSt6chrono3_V212system_clock3nowEv@PLT movq %rax, %r12 movl $0, %r8d leaq 4198400(%rbp), %r9 jmp .L4 .L17: addq $4, %rsi addq $4, %rdi cmpq %rdi, %r9 je .L6 .L8: leaq (%rbx,%r8), %rcx leaq -4194304(%rdi), %rax .L5: movl (%rcx), %edx imull (%rax), %edx addl %edx, (%rsi) addq $4, %rcx addq $4096, %rax cmpq %rax, %rdi jne .L5 jmp .L17 .L6: addq $4096, %r8 cmpq $4194304, %r8 je .L7 .L4: leaq 0(%r13,%r8), %rsi leaq 4194304(%rbp), %rdi jmp .L8 .L7: call _ZNSt6chrono3_V212system_clock3nowEv@PLT subq %r12, %rax movq %rax, %rcx movabsq $2361183241434822607, %rdx imulq %rdx sarq $7, %rdx sarq $63, %rcx movq %rdx, %rbx subq %rcx, %rbx movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax leaq _ZSt4cout(%rip), %rdx movq 240(%rdx,%rax), %rbp testq %rbp, %rbp je .L18 cmpb $0, 56(%rbp) je .L10 movzbl 67(%rbp), %eax .L11: movsbl %al, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movq %rax, %rbp movl $16, %edx leaq .LC0(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq %rbx, %rsi movq %rbp, %rdi call _ZNSo9_M_insertIlEERSoT_@PLT movq %rax, %rbp movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbp,%rax), %rbx testq %rbx, %rbx je .L19 cmpb $0, 56(%rbx) je .L13 movzbl 67(%rbx), %eax .L14: movsbl %al, %esi movq %rbp, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L18: .cfi_restore_state call _ZSt16__throw_bad_castv@PLT .L10: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) jmp .L11 .L19: call _ZSt16__throw_bad_castv@PLT .L13: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) jmp .L14 .cfi_endproc .LFE4131: .size _Z14cpu_matrix_mulPA1024_iS0_S0_, .-_Z14cpu_matrix_mulPA1024_iS0_S0_ .globl _Z38__device_stub__Z14gpu_matrix_mulPiS_S_PiS_S_ .type _Z38__device_stub__Z14gpu_matrix_mulPiS_S_PiS_S_, @function _Z38__device_stub__Z14gpu_matrix_mulPiS_S_PiS_S_: .LFB4159: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L24 .L20: movq 120(%rsp), %rax subq %fs:40, %rax jne .L25 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L24: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14gpu_matrix_mulPiS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L20 .L25: call __stack_chk_fail@PLT .cfi_endproc .LFE4159: .size _Z38__device_stub__Z14gpu_matrix_mulPiS_S_PiS_S_, .-_Z38__device_stub__Z14gpu_matrix_mulPiS_S_PiS_S_ .globl _Z14gpu_matrix_mulPiS_S_ .type _Z14gpu_matrix_mulPiS_S_, @function _Z14gpu_matrix_mulPiS_S_: .LFB4160: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z38__device_stub__Z14gpu_matrix_mulPiS_S_PiS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4160: .size _Z14gpu_matrix_mulPiS_S_, .-_Z14gpu_matrix_mulPiS_S_ .section .rodata.str1.1 .LC1: .string " GPU time: " .LC2: .string " " .LC3: .string "FAILED" .LC4: .string "PASSED" .text .globl main .type main, @function main: .LFB4134: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 leaq -12582912(%rsp), %r11 .cfi_def_cfa 11, 12582944 .LPSRL0: subq $4096, %rsp orq $0, (%rsp) cmpq %r11, %rsp jne .LPSRL0 .cfi_def_cfa_register 7 subq $64, %rsp .cfi_def_cfa_offset 12583008 movq %fs:40, %rax movq %rax, 12582968(%rsp) xorl %eax, %eax movl $4096, %ebp .L29: leaq -4096(%rbp), %rbx .L30: call rand@PLT movslq %eax, %rdx imulq $-542367883, %rdx, %rdx shrq $32, %rdx addl %eax, %edx sarl $8, %edx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx imull $293, %edx, %edx subl %edx, %eax movl %eax, 48(%rsp,%rbx) call rand@PLT movslq %eax, %rdx imulq $1041204193, %rdx, %rdx sarq $36, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx imull $66, %edx, %edx subl %edx, %eax movl %eax, 4194352(%rsp,%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L30 addq $4096, %rbp cmpq $4198400, %rbp jne .L29 leaq 8388656(%rsp), %rdx leaq 4194352(%rsp), %rsi leaq 48(%rsp), %rdi call _Z14cpu_matrix_mulPA1024_iS0_S0_ movl $4194304, %edi call malloc@PLT movq %rax, %rbp movl $4194304, %edi call malloc@PLT movq %rax, %rbx movl $4194304, %edi call malloc@PLT movq %rax, %r12 movl $4096, %ecx .L32: leaq -4096(%rcx), %rax .L33: movl 48(%rsp,%rax), %edx movl %edx, 0(%rbp,%rax) movl 4194352(%rsp,%rax), %edx movl %edx, (%rbx,%rax) addq $4, %rax cmpq %rcx, %rax jne .L33 addq $4096, %rcx cmpq $4198400, %rcx jne .L32 movq %rsp, %rdi movl $4194304, %esi call cudaMalloc@PLT leaq 8(%rsp), %rdi movl $4194304, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movl $4194304, %esi call cudaMalloc@PLT movl $1, %ecx movl $4194304, %edx movq %rbp, %rsi movq (%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $4194304, %edx movq %rbx, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $64, 24(%rsp) movl $64, 28(%rsp) movl $1, 32(%rsp) movl $16, 36(%rsp) movl $16, 40(%rsp) movl $1, 44(%rsp) call _ZNSt6chrono3_V212system_clock3nowEv@PLT movq %rax, %rbx movl 44(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 36(%rsp), %rdx movq 24(%rsp), %rdi movl 32(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L46 .L35: call _ZNSt6chrono3_V212system_clock3nowEv@PLT subq %rbx, %rax movq %rax, %rcx movabsq $2361183241434822607, %rdx imulq %rdx sarq $7, %rdx sarq $63, %rcx movq %rdx, %rbx subq %rcx, %rbx leaq _ZSt4cout(%rip), %rbp movq %rbp, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq %rax, %rdi leaq .LC1(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movq %rbx, %rsi call _ZNSo9_M_insertIlEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $2, %ecx movl $4194304, %edx movq 16(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movl (%r12), %esi movq %rbp, %rdi call _ZNSolsEi@PLT movq %rax, %rdi leaq .LC2(%rip), %rbx movq %rbx, %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl 8388656(%rsp), %esi call _ZNSolsEi@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl 4(%r12), %esi movq %rbp, %rdi call _ZNSolsEi@PLT movq %rax, %rdi movq %rbx, %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl 8388660(%rsp), %esi call _ZNSolsEi@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq %r12, %rdx leaq 8388656(%rsp), %rcx addq $4194304, %r12 .L36: movl $0, %eax .L39: movl (%rcx,%rax), %esi cmpl %esi, (%rdx,%rax) jne .L47 addq $4, %rax cmpq $4096, %rax jne .L39 addq $4096, %rdx addq $4096, %rcx cmpq %r12, %rdx jne .L36 leaq _ZSt4cout(%rip), %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq %rax, %rdi leaq .LC4(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $0, %eax jmp .L28 .L46: movq 16(%rsp), %rdx movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z38__device_stub__Z14gpu_matrix_mulPiS_S_PiS_S_ jmp .L35 .L47: leaq _ZSt4cout(%rip), %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq %rax, %rdi leaq .LC3(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $-1, %eax .L28: movq 12582968(%rsp), %rdx subq %fs:40, %rdx jne .L48 addq $12582976, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L48: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE4134: .size main, .-main .section .rodata.str1.1 .LC5: .string "_Z14gpu_matrix_mulPiS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB4162: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _Z14gpu_matrix_mulPiS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4162: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "matrixMultiplication.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z29__device_stub__gpu_matrix_mulPiS_S_ # -- Begin function _Z29__device_stub__gpu_matrix_mulPiS_S_ .p2align 4, 0x90 .type _Z29__device_stub__gpu_matrix_mulPiS_S_,@function _Z29__device_stub__gpu_matrix_mulPiS_S_: # @_Z29__device_stub__gpu_matrix_mulPiS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14gpu_matrix_mulPiS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z29__device_stub__gpu_matrix_mulPiS_S_, .Lfunc_end0-_Z29__device_stub__gpu_matrix_mulPiS_S_ .cfi_endproc # -- End function .globl _Z14cpu_matrix_mulPA1024_iS0_S0_ # -- Begin function _Z14cpu_matrix_mulPA1024_iS0_S0_ .p2align 4, 0x90 .type _Z14cpu_matrix_mulPA1024_iS0_S0_,@function _Z14cpu_matrix_mulPA1024_iS0_S0_: # @_Z14cpu_matrix_mulPA1024_iS0_S0_ .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r12 xorl %r13d, %r13d callq _ZNSt6chrono3_V212system_clock3nowEv movq %rax, %r15 .p2align 4, 0x90 .LBB1_1: # %.preheader25 # =>This Loop Header: Depth=1 # Child Loop BB1_2 Depth 2 # Child Loop BB1_3 Depth 3 movq %r13, %rax shlq $12, %rax addq %rbx, %rax movq %r14, %rcx xorl %edx, %edx .p2align 4, 0x90 .LBB1_2: # %.preheader # Parent Loop BB1_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB1_3 Depth 3 leaq (%rax,%rdx,4), %rsi movl (%rax,%rdx,4), %edi movq %rcx, %r8 xorl %r9d, %r9d .p2align 4, 0x90 .LBB1_3: # Parent Loop BB1_1 Depth=1 # Parent Loop BB1_2 Depth=2 # => This Inner Loop Header: Depth=3 movl (%r8), %r10d imull (%r12,%r9,4), %r10d addl %r10d, %edi movl %edi, (%rsi) incq %r9 addq $4096, %r8 # imm = 0x1000 cmpq $1024, %r9 # imm = 0x400 jne .LBB1_3 # %bb.4: # in Loop: Header=BB1_2 Depth=2 incq %rdx addq $4, %rcx cmpq $1024, %rdx # imm = 0x400 jne .LBB1_2 # %bb.5: # in Loop: Header=BB1_1 Depth=1 incq %r13 addq $4096, %r12 # imm = 0x1000 cmpq $1024, %r13 # imm = 0x400 jne .LBB1_1 # %bb.6: callq _ZNSt6chrono3_V212system_clock3nowEv subq %r15, %rax movabsq $2361183241434822607, %rcx # imm = 0x20C49BA5E353F7CF imulq %rcx movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r14 testq %r14, %r14 je .LBB1_15 # %bb.7: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i movq %rdx, %rbx movq %rdx, %rax shrq $63, %rax sarq $7, %rbx addq %rax, %rbx cmpb $0, 56(%r14) je .LBB1_9 # %bb.8: movzbl 67(%r14), %eax jmp .LBB1_10 .LBB1_9: movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) .LBB1_10: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq %rax, %r14 movl $.L.str, %esi movl $16, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq %r14, %rdi movq %rbx, %rsi callq _ZNSo9_M_insertIlEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB1_15 # %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i18 cmpb $0, 56(%rbx) je .LBB1_13 # %bb.12: movzbl 67(%rbx), %ecx jmp .LBB1_14 .LBB1_13: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB1_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit21 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 jmp _ZNSo5flushEv # TAILCALL .LBB1_15: .cfi_def_cfa_offset 48 callq _ZSt16__throw_bad_castv .Lfunc_end1: .size _Z14cpu_matrix_mulPA1024_iS0_S0_, .Lfunc_end1-_Z14cpu_matrix_mulPA1024_iS0_S0_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $12583064, %rsp # imm = 0xC00098 .cfi_def_cfa_offset 12583120 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 leaq 8388752(%rsp), %rbx leaq 4194448(%rsp), %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_1: # %.preheader97 # =>This Loop Header: Depth=1 # Child Loop BB2_2 Depth 2 xorl %r12d, %r12d .p2align 4, 0x90 .LBB2_2: # Parent Loop BB2_1 Depth=1 # => This Inner Loop Header: Depth=2 callq rand cltq imulq $-542367883, %rax, %rcx # imm = 0xDFAC1F75 shrq $32, %rcx addl %eax, %ecx movl %ecx, %edx shrl $31, %edx sarl $8, %ecx addl %edx, %ecx imull $293, %ecx, %ecx # imm = 0x125 subl %ecx, %eax movl %eax, (%rbx,%r12,4) callq rand cltq imulq $1041204193, %rax, %rcx # imm = 0x3E0F83E1 movq %rcx, %rdx shrq $63, %rdx sarq $36, %rcx addl %edx, %ecx movl %ecx, %edx shll $6, %edx leal (%rdx,%rcx,2), %ecx subl %ecx, %eax movl %eax, (%r14,%r12,4) incq %r12 cmpq $1024, %r12 # imm = 0x400 jne .LBB2_2 # %bb.3: # in Loop: Header=BB2_1 Depth=1 incq %r15 addq $4096, %rbx # imm = 0x1000 addq $4096, %r14 # imm = 0x1000 cmpq $1024, %r15 # imm = 0x400 jne .LBB2_1 # %bb.4: leaq 8388752(%rsp), %r13 leaq 4194448(%rsp), %r12 leaq 144(%rsp), %rdx movq %r13, %rdi movq %r12, %rsi callq _Z14cpu_matrix_mulPA1024_iS0_S0_ movl $4194304, %edi # imm = 0x400000 callq malloc movq %rax, %r15 movl $4194304, %edi # imm = 0x400000 callq malloc movq %rax, %r14 movl $4194304, %edi # imm = 0x400000 callq malloc movq %rax, (%rsp) # 8-byte Spill movl $4194304, %edx # imm = 0x400000 movq %r15, %rdi movq %r13, %rsi callq memcpy@PLT movl $4194304, %edx # imm = 0x400000 movq %r14, %rdi movq %r12, %rsi callq memcpy@PLT leaq 32(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc leaq 24(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc leaq 16(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc movq 32(%rsp), %rdi movl $4194304, %edx # imm = 0x400000 movq %r15, %rsi movl $1, %ecx callq hipMemcpy movq 24(%rsp), %rdi movl $4194304, %edx # imm = 0x400000 movq %r14, %rsi movl $1, %ecx callq hipMemcpy callq _ZNSt6chrono3_V212system_clock3nowEv movq %rax, %r14 movabsq $274877907008, %rdi # imm = 0x4000000040 movabsq $68719476752, %rdx # imm = 0x1000000010 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_6 # %bb.5: movq 32(%rsp), %rax movq 24(%rsp), %rcx movq 16(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z14gpu_matrix_mulPiS_S_, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_6: callq _ZNSt6chrono3_V212system_clock3nowEv subq %r14, %rax movabsq $2361183241434822607, %rcx # imm = 0x20C49BA5E353F7CF imulq %rcx movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r15 testq %r15, %r15 je .LBB2_52 # %bb.7: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i movq %rdx, %r14 movq %rdx, %rax shrq $63, %rax sarq $7, %r14 addq %rax, %r14 cmpb $0, 56(%r15) je .LBB2_9 # %bb.8: movzbl 67(%r15), %eax jmp .LBB2_10 .LBB2_9: movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r15), %rax movq %r15, %rdi movl $10, %esi callq *48(%rax) .LBB2_10: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq %rax, %r15 movl $.L.str.1, %esi movl $11, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq %r15, %rdi movq %r14, %rsi callq _ZNSo9_M_insertIlEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r14 testq %r14, %r14 je .LBB2_52 # %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i56 cmpb $0, 56(%r14) je .LBB2_13 # %bb.12: movzbl 67(%r14), %ecx jmp .LBB2_14 .LBB2_13: movq %r14, %rdi movq %rax, %rbx callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %rbx, %rax .LBB2_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit59 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq 16(%rsp), %rsi movl $4194304, %edx # imm = 0x400000 movq (%rsp), %rbx # 8-byte Reload movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movl (%rbx), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movq %rax, %r14 movl $.L.str.2, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl 144(%rsp), %esi movq %r14, %rdi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r14 testq %r14, %r14 je .LBB2_52 # %bb.15: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i61 cmpb $0, 56(%r14) je .LBB2_17 # %bb.16: movzbl 67(%r14), %ecx jmp .LBB2_18 .LBB2_17: movq %r14, %rdi movq %rax, %rbx callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %rbx, %rax .LBB2_18: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit64 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq (%rsp), %rax # 8-byte Reload movl 4(%rax), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movq %rax, %r14 movl $.L.str.2, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl 148(%rsp), %esi movq %r14, %rdi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r14 testq %r14, %r14 je .LBB2_52 # %bb.19: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i66 cmpb $0, 56(%r14) je .LBB2_21 # %bb.20: movzbl 67(%r14), %ecx jmp .LBB2_22 .LBB2_21: movq %r14, %rdi movq %rax, %rbx callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %rbx, %rax .LBB2_22: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit69 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq (%rsp), %r12 # 8-byte Reload addq $4, %r12 xorl %ebx, %ebx leaq 148(%rsp), %r13 xorl %ebp, %ebp xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_24: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB2_27 Depth 2 movq %rbx, %rax shlq $12, %rax movq (%rsp), %rcx # 8-byte Reload movl (%rcx,%rax), %ecx cmpl 144(%rsp,%rax), %ecx jne .LBB2_25 # %bb.26: # %.lr.ph.preheader # in Loop: Header=BB2_24 Depth=1 xorl %eax, %eax .p2align 4, 0x90 .LBB2_27: # %.lr.ph # Parent Loop BB2_24 Depth=1 # => This Inner Loop Header: Depth=2 cmpq $1023, %rax # imm = 0x3FF je .LBB2_39 # %bb.28: # in Loop: Header=BB2_27 Depth=2 movl (%r12,%rax,4), %edx leaq 1(%rax), %rcx cmpl (%r13,%rax,4), %edx movq %rcx, %rax je .LBB2_27 # %bb.29: # %._crit_edge.loopexit # in Loop: Header=BB2_24 Depth=1 decq %rcx cmpq $1023, %rcx # imm = 0x3FF setae %cl jmp .LBB2_30 .p2align 4, 0x90 .LBB2_25: # in Loop: Header=BB2_24 Depth=1 xorl %ecx, %ecx .LBB2_30: # %._crit_edge # in Loop: Header=BB2_24 Depth=1 movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r14 testq %r14, %r14 je .LBB2_52 # %bb.31: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i71 # in Loop: Header=BB2_24 Depth=1 movl %ecx, 12(%rsp) # 4-byte Spill cmpb $0, 56(%r14) je .LBB2_33 # %bb.32: # in Loop: Header=BB2_24 Depth=1 movzbl 67(%r14), %eax jmp .LBB2_34 .p2align 4, 0x90 .LBB2_33: # in Loop: Header=BB2_24 Depth=1 movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) .LBB2_34: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit74 # in Loop: Header=BB2_24 Depth=1 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq %rax, %r14 movl $.L.str.3, %esi movl $6, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%r14), %rax movq -24(%rax), %rax movq 240(%r14,%rax), %r15 testq %r15, %r15 je .LBB2_52 # %bb.35: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i76 # in Loop: Header=BB2_24 Depth=1 cmpb $0, 56(%r15) je .LBB2_37 # %bb.36: # in Loop: Header=BB2_24 Depth=1 movzbl 67(%r15), %eax jmp .LBB2_38 .p2align 4, 0x90 .LBB2_37: # in Loop: Header=BB2_24 Depth=1 movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r15), %rax movq %r15, %rdi movl $10, %esi callq *48(%rax) .LBB2_38: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit79 # in Loop: Header=BB2_24 Depth=1 movsbl %al, %esi movq %r14, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $-1, %r15d movl 12(%rsp), %eax # 4-byte Reload testb %al, %al jne .LBB2_23 jmp .LBB2_41 .p2align 4, 0x90 .LBB2_39: # %.loopexit.loopexit # in Loop: Header=BB2_24 Depth=1 setae %al testb %al, %al je .LBB2_41 .LBB2_23: # in Loop: Header=BB2_24 Depth=1 cmpq $1023, %rbx # imm = 0x3FF leaq 1(%rbx), %rax setae %bpl addq $4096, %r12 # imm = 0x1000 addq $4096, %r13 # imm = 0x1000 movq %rax, %rbx cmpq $1024, %rax # imm = 0x400 jne .LBB2_24 .LBB2_41: testb $1, %bpl je .LBB2_51 # %bb.42: movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB2_52 # %bb.43: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i81 cmpb $0, 56(%rbx) je .LBB2_45 # %bb.44: movzbl 67(%rbx), %eax jmp .LBB2_46 .LBB2_45: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB2_46: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit84 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq %rax, %rbx movl $.L.str.4, %esi movl $6, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r14 testq %r14, %r14 je .LBB2_52 # %bb.47: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i86 cmpb $0, 56(%r14) je .LBB2_49 # %bb.48: movzbl 67(%r14), %eax jmp .LBB2_50 .LBB2_49: movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) .LBB2_50: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit89 movsbl %al, %esi movq %rbx, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv .LBB2_51: movl %r15d, %eax addq $12583064, %rsp # imm = 0xC00098 .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB2_52: .cfi_def_cfa_offset 12583120 callq _ZSt16__throw_bad_castv .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14gpu_matrix_mulPiS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z14gpu_matrix_mulPiS_S_,@object # @_Z14gpu_matrix_mulPiS_S_ .section .rodata,"a",@progbits .globl _Z14gpu_matrix_mulPiS_S_ .p2align 3, 0x0 _Z14gpu_matrix_mulPiS_S_: .quad _Z29__device_stub__gpu_matrix_mulPiS_S_ .size _Z14gpu_matrix_mulPiS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz " CPU exec time: " .size .L.str, 17 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz " GPU time: " .size .L.str.1, 12 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz " " .size .L.str.2, 2 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "FAILED" .size .L.str.3, 7 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "PASSED" .size .L.str.4, 7 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z14gpu_matrix_mulPiS_S_" .size .L__unnamed_1, 25 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z29__device_stub__gpu_matrix_mulPiS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z14gpu_matrix_mulPiS_S_ .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
extern "C" { __global__ void fill_u8(unsigned char *y, unsigned char elem, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = elem; } } __global__ void fill_u32(unsigned int *y, unsigned int elem, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = elem; } } __global__ void u8_to_f32(const unsigned char* x, float* y, unsigned int len) { const float scale = 1.0f / 255.0f; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = scale * x[tid]; } } __global__ void u8_to_one_hot_f32(const unsigned char* x, unsigned int nclasses, float* y, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid*nclasses+x[tid]] = 1.0f; } } __global__ void broadcast(const float* x, float* y, unsigned int c, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = x[tid % c]; } } __global__ void broadcast_backward(float* dx, const float* dy, unsigned int c, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { atomicAdd(&dx[tid % c], dy[tid]); } } __global__ void add(const float* x1, const float* x2, float* y, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = x1[tid] + x2[tid]; } } __global__ void cross_entropy_forward(unsigned int batch_size, unsigned int nclasses, const float* x, const float* t, float* y) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < batch_size) { // compute max value of slice float m = x[tid*nclasses]; for(int i = 1; i < nclasses; ++i) { m = fmaxf(x[tid*nclasses+i], m); } // subtract max for(int i = 0; i < nclasses; ++i) { y[tid*nclasses+i] = x[tid*nclasses+i]-m; } // sum float s = 0.0f; for(int i = 0; i < nclasses; ++i) { s += expf(y[tid*nclasses+i]); } // compute ln(s) float ln_s = logf(s); // y = (ln_s - y) * t for(int i = 0; i < nclasses; ++i) { y[tid*nclasses+i] = (ln_s - y[tid*nclasses+i]) * t[tid*nclasses+i]; } } } __global__ void cross_entropy_backward(const float* x, float* dx, const float* t, float* dy, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { dx[tid] = dy[0] * (x[tid] - t[tid]); } } __global__ void reduce_sum_partial(const float* input, float* output, unsigned int len) { // from http://www.techdarting.com/2014/06/parallel-reduction-in-cuda.html // Load a segment of the input vector into shared memory __shared__ float partialSum[2*256]; int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x; unsigned int t = threadIdx.x; unsigned int start = 2*blockIdx.x*blockDim.x; if ((start + t) < len) { partialSum[t] = input[start + t]; } else { partialSum[t] = 0.0; } if ((start + blockDim.x + t) < len) { partialSum[blockDim.x + t] = input[start + blockDim.x + t]; } else { partialSum[blockDim.x + t] = 0.0; } // Traverse reduction tree for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) partialSum[t] += partialSum[t + stride]; } __syncthreads(); // Write the computed sum of the block to the output vector at correct index if (t == 0 && (globalThreadId*2) < len) { output[blockIdx.x] = partialSum[t]; } } __global__ void reduce_sum_final(const float* x, float* y, unsigned int len) { *y = 0; for(int i = 0; i < len; ++i) { *y += x[i]; } } __global__ void reverse_conv_filter(const float* x, float beta, float* y, unsigned int filter_len, unsigned int len) { int tid = blockIdx.x*blockDim.x + threadIdx.x; if (tid < len) { if (beta == 0.0f) { for(int i = 0; i < filter_len; ++i) { y[tid*filter_len + i] = x[tid*filter_len + ((filter_len-1) - i)]; } } else { for(int i = 0; i < filter_len; ++i) { y[tid*filter_len + i] = x[tid*filter_len + ((filter_len-1) - i)] + beta * y[tid*filter_len + i]; } } } } __global__ void sgd_with_momentum(float* w, const float* dw, float learning_rate, float momentum, float* v, unsigned int len) { int tid = blockIdx.x*blockDim.x + threadIdx.x; if (tid < len) { v[tid] = momentum * v[tid] + dw[tid]; w[tid] -= learning_rate * v[tid]; } } }
.file "tmpxft_0000d7bd_00000000-6_kernels.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z28__device_stub__Z7fill_u8PhhjPhhj .type _Z28__device_stub__Z7fill_u8PhhjPhhj, @function _Z28__device_stub__Z7fill_u8PhhjPhhj: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %edx, (%rsp) movb %sil, 4(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movq %rsp, %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq fill_u8(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z28__device_stub__Z7fill_u8PhhjPhhj, .-_Z28__device_stub__Z7fill_u8PhhjPhhj .globl fill_u8 .type fill_u8, @function fill_u8: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movzbl %sil, %esi call _Z28__device_stub__Z7fill_u8PhhjPhhj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size fill_u8, .-fill_u8 .globl _Z29__device_stub__Z8fill_u32PjjjPjjj .type _Z29__device_stub__Z8fill_u32PjjjPjjj, @function _Z29__device_stub__Z8fill_u32PjjjPjjj: .LFB2053: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movq %rsp, %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 104(%rsp), %rax subq %fs:40, %rax jne .L16 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq fill_u32(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2053: .size _Z29__device_stub__Z8fill_u32PjjjPjjj, .-_Z29__device_stub__Z8fill_u32PjjjPjjj .globl fill_u32 .type fill_u32, @function fill_u32: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z8fill_u32PjjjPjjj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size fill_u32, .-fill_u32 .globl _Z32__device_stub__Z9u8_to_f32PKhPfjPKhPfj .type _Z32__device_stub__Z9u8_to_f32PKhPfjPKhPfj, @function _Z32__device_stub__Z9u8_to_f32PKhPfjPKhPfj: .LFB2055: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L23 .L19: movq 120(%rsp), %rax subq %fs:40, %rax jne .L24 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq u8_to_f32(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L19 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE2055: .size _Z32__device_stub__Z9u8_to_f32PKhPfjPKhPfj, .-_Z32__device_stub__Z9u8_to_f32PKhPfjPKhPfj .globl u8_to_f32 .type u8_to_f32, @function u8_to_f32: .LFB2056: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z9u8_to_f32PKhPfjPKhPfj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2056: .size u8_to_f32, .-u8_to_f32 .globl _Z42__device_stub__Z17u8_to_one_hot_f32PKhjPfjPKhjPfj .type _Z42__device_stub__Z17u8_to_one_hot_f32PKhjPfjPKhjPfj, @function _Z42__device_stub__Z17u8_to_one_hot_f32PKhjPfjPKhjPfj: .LFB2057: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movl %ecx, 16(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 16(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L31 .L27: movq 136(%rsp), %rax subq %fs:40, %rax jne .L32 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L31: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq u8_to_one_hot_f32(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L27 .L32: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z42__device_stub__Z17u8_to_one_hot_f32PKhjPfjPKhjPfj, .-_Z42__device_stub__Z17u8_to_one_hot_f32PKhjPfjPKhjPfj .globl u8_to_one_hot_f32 .type u8_to_one_hot_f32, @function u8_to_one_hot_f32: .LFB2058: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z17u8_to_one_hot_f32PKhjPfjPKhjPfj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size u8_to_one_hot_f32, .-u8_to_one_hot_f32 .globl _Z33__device_stub__Z9broadcastPKfPfjjPKfPfjj .type _Z33__device_stub__Z9broadcastPKfPfjjPKfPfjj, @function _Z33__device_stub__Z9broadcastPKfPfjjPKfPfjj: .LFB2059: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L39 .L35: movq 136(%rsp), %rax subq %fs:40, %rax jne .L40 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L39: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq broadcast(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L35 .L40: call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size _Z33__device_stub__Z9broadcastPKfPfjjPKfPfjj, .-_Z33__device_stub__Z9broadcastPKfPfjjPKfPfjj .globl broadcast .type broadcast, @function broadcast: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z33__device_stub__Z9broadcastPKfPfjjPKfPfjj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size broadcast, .-broadcast .globl _Z43__device_stub__Z18broadcast_backwardPfPKfjjPfPKfjj .type _Z43__device_stub__Z18broadcast_backwardPfPKfjjPfPKfjj, @function _Z43__device_stub__Z18broadcast_backwardPfPKfjjPfPKfjj: .LFB2061: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L47 .L43: movq 136(%rsp), %rax subq %fs:40, %rax jne .L48 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L47: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq broadcast_backward(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L43 .L48: call __stack_chk_fail@PLT .cfi_endproc .LFE2061: .size _Z43__device_stub__Z18broadcast_backwardPfPKfjjPfPKfjj, .-_Z43__device_stub__Z18broadcast_backwardPfPKfjjPfPKfjj .globl broadcast_backward .type broadcast_backward, @function broadcast_backward: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z43__device_stub__Z18broadcast_backwardPfPKfjjPfPKfjj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size broadcast_backward, .-broadcast_backward .globl _Z29__device_stub__Z3addPKfS0_PfjPKfS0_Pfj .type _Z29__device_stub__Z3addPKfS0_PfjPKfS0_Pfj, @function _Z29__device_stub__Z3addPKfS0_PfjPKfS0_Pfj: .LFB2063: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L55 .L51: movq 136(%rsp), %rax subq %fs:40, %rax jne .L56 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L55: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq add(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L51 .L56: call __stack_chk_fail@PLT .cfi_endproc .LFE2063: .size _Z29__device_stub__Z3addPKfS0_PfjPKfS0_Pfj, .-_Z29__device_stub__Z3addPKfS0_PfjPKfS0_Pfj .globl add .type add, @function add: .LFB2064: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z3addPKfS0_PfjPKfS0_Pfj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2064: .size add, .-add .globl _Z49__device_stub__Z21cross_entropy_forwardjjPKfS0_PfjjPKfS0_Pf .type _Z49__device_stub__Z21cross_entropy_forwardjjPKfS0_PfjjPKfS0_Pf, @function _Z49__device_stub__Z21cross_entropy_forwardjjPKfS0_PfjjPKfS0_Pf: .LFB2065: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 28(%rsp) movl %esi, 24(%rsp) movq %rdx, 16(%rsp) movq %rcx, 8(%rsp) movq %r8, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 24(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L63 .L59: movq 136(%rsp), %rax subq %fs:40, %rax jne .L64 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L63: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq cross_entropy_forward(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L59 .L64: call __stack_chk_fail@PLT .cfi_endproc .LFE2065: .size _Z49__device_stub__Z21cross_entropy_forwardjjPKfS0_PfjjPKfS0_Pf, .-_Z49__device_stub__Z21cross_entropy_forwardjjPKfS0_PfjjPKfS0_Pf .globl cross_entropy_forward .type cross_entropy_forward, @function cross_entropy_forward: .LFB2066: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z49__device_stub__Z21cross_entropy_forwardjjPKfS0_PfjjPKfS0_Pf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2066: .size cross_entropy_forward, .-cross_entropy_forward .globl _Z52__device_stub__Z22cross_entropy_backwardPKfPfS0_S1_jPKfPfS0_S1_j .type _Z52__device_stub__Z22cross_entropy_backwardPKfPfS0_S1_jPKfPfS0_S1_j, @function _Z52__device_stub__Z22cross_entropy_backwardPKfPfS0_S1_jPKfPfS0_S1_j: .LFB2067: .cfi_startproc endbr64 subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movl %r8d, 12(%rsp) movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 12(%rsp), %rax movq %rax, 144(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L71 .L67: movq 152(%rsp), %rax subq %fs:40, %rax jne .L72 addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L71: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 184 pushq 56(%rsp) .cfi_def_cfa_offset 192 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq cross_entropy_backward(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L67 .L72: call __stack_chk_fail@PLT .cfi_endproc .LFE2067: .size _Z52__device_stub__Z22cross_entropy_backwardPKfPfS0_S1_jPKfPfS0_S1_j, .-_Z52__device_stub__Z22cross_entropy_backwardPKfPfS0_S1_jPKfPfS0_S1_j .globl cross_entropy_backward .type cross_entropy_backward, @function cross_entropy_backward: .LFB2068: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z52__device_stub__Z22cross_entropy_backwardPKfPfS0_S1_jPKfPfS0_S1_j addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2068: .size cross_entropy_backward, .-cross_entropy_backward .globl _Z42__device_stub__Z18reduce_sum_partialPKfPfjPKfPfj .type _Z42__device_stub__Z18reduce_sum_partialPKfPfjPKfPfj, @function _Z42__device_stub__Z18reduce_sum_partialPKfPfjPKfPfj: .LFB2069: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L79 .L75: movq 120(%rsp), %rax subq %fs:40, %rax jne .L80 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L79: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq reduce_sum_partial(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L75 .L80: call __stack_chk_fail@PLT .cfi_endproc .LFE2069: .size _Z42__device_stub__Z18reduce_sum_partialPKfPfjPKfPfj, .-_Z42__device_stub__Z18reduce_sum_partialPKfPfjPKfPfj .globl reduce_sum_partial .type reduce_sum_partial, @function reduce_sum_partial: .LFB2070: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z18reduce_sum_partialPKfPfjPKfPfj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2070: .size reduce_sum_partial, .-reduce_sum_partial .globl _Z40__device_stub__Z16reduce_sum_finalPKfPfjPKfPfj .type _Z40__device_stub__Z16reduce_sum_finalPKfPfjPKfPfj, @function _Z40__device_stub__Z16reduce_sum_finalPKfPfjPKfPfj: .LFB2071: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L87 .L83: movq 120(%rsp), %rax subq %fs:40, %rax jne .L88 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L87: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq reduce_sum_final(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L83 .L88: call __stack_chk_fail@PLT .cfi_endproc .LFE2071: .size _Z40__device_stub__Z16reduce_sum_finalPKfPfjPKfPfj, .-_Z40__device_stub__Z16reduce_sum_finalPKfPfjPKfPfj .globl reduce_sum_final .type reduce_sum_final, @function reduce_sum_final: .LFB2072: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z40__device_stub__Z16reduce_sum_finalPKfPfjPKfPfj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2072: .size reduce_sum_final, .-reduce_sum_final .globl _Z45__device_stub__Z19reverse_conv_filterPKffPfjjPKffPfjj .type _Z45__device_stub__Z19reverse_conv_filterPKffPfjjPKffPfjj, @function _Z45__device_stub__Z19reverse_conv_filterPKffPfjjPKffPfjj: .LFB2073: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movss %xmm0, 20(%rsp) movq %rsi, 8(%rsp) movl %edx, 16(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 16(%rsp), %rax movq %rax, 120(%rsp) leaq 4(%rsp), %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L95 .L91: movq 136(%rsp), %rax subq %fs:40, %rax jne .L96 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L95: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq reverse_conv_filter(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L91 .L96: call __stack_chk_fail@PLT .cfi_endproc .LFE2073: .size _Z45__device_stub__Z19reverse_conv_filterPKffPfjjPKffPfjj, .-_Z45__device_stub__Z19reverse_conv_filterPKffPfjjPKffPfjj .globl reverse_conv_filter .type reverse_conv_filter, @function reverse_conv_filter: .LFB2074: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z45__device_stub__Z19reverse_conv_filterPKffPfjjPKffPfjj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2074: .size reverse_conv_filter, .-reverse_conv_filter .globl _Z45__device_stub__Z17sgd_with_momentumPfPKfffS_jPfPKfffS_j .type _Z45__device_stub__Z17sgd_with_momentumPfPKfffS_jPfPKfffS_j, @function _Z45__device_stub__Z17sgd_with_momentumPfPKfffS_jPfPKfffS_j: .LFB2075: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movss %xmm0, 28(%rsp) movss %xmm1, 24(%rsp) movq %rdx, 16(%rsp) movl %ecx, 12(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 28(%rsp), %rax movq %rax, 128(%rsp) leaq 24(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L103 .L99: movq 168(%rsp), %rax subq %fs:40, %rax jne .L104 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L103: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq sgd_with_momentum(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L99 .L104: call __stack_chk_fail@PLT .cfi_endproc .LFE2075: .size _Z45__device_stub__Z17sgd_with_momentumPfPKfffS_jPfPKfffS_j, .-_Z45__device_stub__Z17sgd_with_momentumPfPKfffS_jPfPKfffS_j .globl sgd_with_momentum .type sgd_with_momentum, @function sgd_with_momentum: .LFB2076: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z45__device_stub__Z17sgd_with_momentumPfPKfffS_jPfPKfffS_j addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2076: .size sgd_with_momentum, .-sgd_with_momentum .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "sgd_with_momentum" .LC1: .string "reverse_conv_filter" .LC2: .string "reduce_sum_final" .LC3: .string "reduce_sum_partial" .LC4: .string "cross_entropy_backward" .LC5: .string "cross_entropy_forward" .LC6: .string "add" .LC7: .string "broadcast_backward" .LC8: .string "broadcast" .LC9: .string "u8_to_one_hot_f32" .LC10: .string "u8_to_f32" .LC11: .string "fill_u32" .LC12: .string "fill_u8" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2078: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq sgd_with_momentum(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq reverse_conv_filter(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq reduce_sum_final(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq reduce_sum_partial(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq cross_entropy_backward(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq cross_entropy_forward(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq add(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq broadcast_backward(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq broadcast(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq u8_to_one_hot_f32(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC10(%rip), %rdx movq %rdx, %rcx leaq u8_to_f32(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC11(%rip), %rdx movq %rdx, %rcx leaq fill_u32(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC12(%rip), %rdx movq %rdx, %rcx leaq fill_u8(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2078: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
extern "C" { __global__ void fill_u8(unsigned char *y, unsigned char elem, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = elem; } } __global__ void fill_u32(unsigned int *y, unsigned int elem, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = elem; } } __global__ void u8_to_f32(const unsigned char* x, float* y, unsigned int len) { const float scale = 1.0f / 255.0f; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = scale * x[tid]; } } __global__ void u8_to_one_hot_f32(const unsigned char* x, unsigned int nclasses, float* y, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid*nclasses+x[tid]] = 1.0f; } } __global__ void broadcast(const float* x, float* y, unsigned int c, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = x[tid % c]; } } __global__ void broadcast_backward(float* dx, const float* dy, unsigned int c, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { atomicAdd(&dx[tid % c], dy[tid]); } } __global__ void add(const float* x1, const float* x2, float* y, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = x1[tid] + x2[tid]; } } __global__ void cross_entropy_forward(unsigned int batch_size, unsigned int nclasses, const float* x, const float* t, float* y) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < batch_size) { // compute max value of slice float m = x[tid*nclasses]; for(int i = 1; i < nclasses; ++i) { m = fmaxf(x[tid*nclasses+i], m); } // subtract max for(int i = 0; i < nclasses; ++i) { y[tid*nclasses+i] = x[tid*nclasses+i]-m; } // sum float s = 0.0f; for(int i = 0; i < nclasses; ++i) { s += expf(y[tid*nclasses+i]); } // compute ln(s) float ln_s = logf(s); // y = (ln_s - y) * t for(int i = 0; i < nclasses; ++i) { y[tid*nclasses+i] = (ln_s - y[tid*nclasses+i]) * t[tid*nclasses+i]; } } } __global__ void cross_entropy_backward(const float* x, float* dx, const float* t, float* dy, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { dx[tid] = dy[0] * (x[tid] - t[tid]); } } __global__ void reduce_sum_partial(const float* input, float* output, unsigned int len) { // from http://www.techdarting.com/2014/06/parallel-reduction-in-cuda.html // Load a segment of the input vector into shared memory __shared__ float partialSum[2*256]; int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x; unsigned int t = threadIdx.x; unsigned int start = 2*blockIdx.x*blockDim.x; if ((start + t) < len) { partialSum[t] = input[start + t]; } else { partialSum[t] = 0.0; } if ((start + blockDim.x + t) < len) { partialSum[blockDim.x + t] = input[start + blockDim.x + t]; } else { partialSum[blockDim.x + t] = 0.0; } // Traverse reduction tree for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) partialSum[t] += partialSum[t + stride]; } __syncthreads(); // Write the computed sum of the block to the output vector at correct index if (t == 0 && (globalThreadId*2) < len) { output[blockIdx.x] = partialSum[t]; } } __global__ void reduce_sum_final(const float* x, float* y, unsigned int len) { *y = 0; for(int i = 0; i < len; ++i) { *y += x[i]; } } __global__ void reverse_conv_filter(const float* x, float beta, float* y, unsigned int filter_len, unsigned int len) { int tid = blockIdx.x*blockDim.x + threadIdx.x; if (tid < len) { if (beta == 0.0f) { for(int i = 0; i < filter_len; ++i) { y[tid*filter_len + i] = x[tid*filter_len + ((filter_len-1) - i)]; } } else { for(int i = 0; i < filter_len; ++i) { y[tid*filter_len + i] = x[tid*filter_len + ((filter_len-1) - i)] + beta * y[tid*filter_len + i]; } } } } __global__ void sgd_with_momentum(float* w, const float* dw, float learning_rate, float momentum, float* v, unsigned int len) { int tid = blockIdx.x*blockDim.x + threadIdx.x; if (tid < len) { v[tid] = momentum * v[tid] + dw[tid]; w[tid] -= learning_rate * v[tid]; } } }
#include <hip/hip_runtime.h> extern "C" { __global__ void fill_u8(unsigned char *y, unsigned char elem, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = elem; } } __global__ void fill_u32(unsigned int *y, unsigned int elem, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = elem; } } __global__ void u8_to_f32(const unsigned char* x, float* y, unsigned int len) { const float scale = 1.0f / 255.0f; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = scale * x[tid]; } } __global__ void u8_to_one_hot_f32(const unsigned char* x, unsigned int nclasses, float* y, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid*nclasses+x[tid]] = 1.0f; } } __global__ void broadcast(const float* x, float* y, unsigned int c, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = x[tid % c]; } } __global__ void broadcast_backward(float* dx, const float* dy, unsigned int c, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { atomicAdd(&dx[tid % c], dy[tid]); } } __global__ void add(const float* x1, const float* x2, float* y, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = x1[tid] + x2[tid]; } } __global__ void cross_entropy_forward(unsigned int batch_size, unsigned int nclasses, const float* x, const float* t, float* y) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < batch_size) { // compute max value of slice float m = x[tid*nclasses]; for(int i = 1; i < nclasses; ++i) { m = fmaxf(x[tid*nclasses+i], m); } // subtract max for(int i = 0; i < nclasses; ++i) { y[tid*nclasses+i] = x[tid*nclasses+i]-m; } // sum float s = 0.0f; for(int i = 0; i < nclasses; ++i) { s += expf(y[tid*nclasses+i]); } // compute ln(s) float ln_s = logf(s); // y = (ln_s - y) * t for(int i = 0; i < nclasses; ++i) { y[tid*nclasses+i] = (ln_s - y[tid*nclasses+i]) * t[tid*nclasses+i]; } } } __global__ void cross_entropy_backward(const float* x, float* dx, const float* t, float* dy, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { dx[tid] = dy[0] * (x[tid] - t[tid]); } } __global__ void reduce_sum_partial(const float* input, float* output, unsigned int len) { // from http://www.techdarting.com/2014/06/parallel-reduction-in-cuda.html // Load a segment of the input vector into shared memory __shared__ float partialSum[2*256]; int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x; unsigned int t = threadIdx.x; unsigned int start = 2*blockIdx.x*blockDim.x; if ((start + t) < len) { partialSum[t] = input[start + t]; } else { partialSum[t] = 0.0; } if ((start + blockDim.x + t) < len) { partialSum[blockDim.x + t] = input[start + blockDim.x + t]; } else { partialSum[blockDim.x + t] = 0.0; } // Traverse reduction tree for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) partialSum[t] += partialSum[t + stride]; } __syncthreads(); // Write the computed sum of the block to the output vector at correct index if (t == 0 && (globalThreadId*2) < len) { output[blockIdx.x] = partialSum[t]; } } __global__ void reduce_sum_final(const float* x, float* y, unsigned int len) { *y = 0; for(int i = 0; i < len; ++i) { *y += x[i]; } } __global__ void reverse_conv_filter(const float* x, float beta, float* y, unsigned int filter_len, unsigned int len) { int tid = blockIdx.x*blockDim.x + threadIdx.x; if (tid < len) { if (beta == 0.0f) { for(int i = 0; i < filter_len; ++i) { y[tid*filter_len + i] = x[tid*filter_len + ((filter_len-1) - i)]; } } else { for(int i = 0; i < filter_len; ++i) { y[tid*filter_len + i] = x[tid*filter_len + ((filter_len-1) - i)] + beta * y[tid*filter_len + i]; } } } } __global__ void sgd_with_momentum(float* w, const float* dw, float learning_rate, float momentum, float* v, unsigned int len) { int tid = blockIdx.x*blockDim.x + threadIdx.x; if (tid < len) { v[tid] = momentum * v[tid] + dw[tid]; w[tid] -= learning_rate * v[tid]; } } }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> extern "C" { __global__ void fill_u8(unsigned char *y, unsigned char elem, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = elem; } } __global__ void fill_u32(unsigned int *y, unsigned int elem, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = elem; } } __global__ void u8_to_f32(const unsigned char* x, float* y, unsigned int len) { const float scale = 1.0f / 255.0f; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = scale * x[tid]; } } __global__ void u8_to_one_hot_f32(const unsigned char* x, unsigned int nclasses, float* y, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid*nclasses+x[tid]] = 1.0f; } } __global__ void broadcast(const float* x, float* y, unsigned int c, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = x[tid % c]; } } __global__ void broadcast_backward(float* dx, const float* dy, unsigned int c, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { atomicAdd(&dx[tid % c], dy[tid]); } } __global__ void add(const float* x1, const float* x2, float* y, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = x1[tid] + x2[tid]; } } __global__ void cross_entropy_forward(unsigned int batch_size, unsigned int nclasses, const float* x, const float* t, float* y) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < batch_size) { // compute max value of slice float m = x[tid*nclasses]; for(int i = 1; i < nclasses; ++i) { m = fmaxf(x[tid*nclasses+i], m); } // subtract max for(int i = 0; i < nclasses; ++i) { y[tid*nclasses+i] = x[tid*nclasses+i]-m; } // sum float s = 0.0f; for(int i = 0; i < nclasses; ++i) { s += expf(y[tid*nclasses+i]); } // compute ln(s) float ln_s = logf(s); // y = (ln_s - y) * t for(int i = 0; i < nclasses; ++i) { y[tid*nclasses+i] = (ln_s - y[tid*nclasses+i]) * t[tid*nclasses+i]; } } } __global__ void cross_entropy_backward(const float* x, float* dx, const float* t, float* dy, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { dx[tid] = dy[0] * (x[tid] - t[tid]); } } __global__ void reduce_sum_partial(const float* input, float* output, unsigned int len) { // from http://www.techdarting.com/2014/06/parallel-reduction-in-cuda.html // Load a segment of the input vector into shared memory __shared__ float partialSum[2*256]; int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x; unsigned int t = threadIdx.x; unsigned int start = 2*blockIdx.x*blockDim.x; if ((start + t) < len) { partialSum[t] = input[start + t]; } else { partialSum[t] = 0.0; } if ((start + blockDim.x + t) < len) { partialSum[blockDim.x + t] = input[start + blockDim.x + t]; } else { partialSum[blockDim.x + t] = 0.0; } // Traverse reduction tree for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) partialSum[t] += partialSum[t + stride]; } __syncthreads(); // Write the computed sum of the block to the output vector at correct index if (t == 0 && (globalThreadId*2) < len) { output[blockIdx.x] = partialSum[t]; } } __global__ void reduce_sum_final(const float* x, float* y, unsigned int len) { *y = 0; for(int i = 0; i < len; ++i) { *y += x[i]; } } __global__ void reverse_conv_filter(const float* x, float beta, float* y, unsigned int filter_len, unsigned int len) { int tid = blockIdx.x*blockDim.x + threadIdx.x; if (tid < len) { if (beta == 0.0f) { for(int i = 0; i < filter_len; ++i) { y[tid*filter_len + i] = x[tid*filter_len + ((filter_len-1) - i)]; } } else { for(int i = 0; i < filter_len; ++i) { y[tid*filter_len + i] = x[tid*filter_len + ((filter_len-1) - i)] + beta * y[tid*filter_len + i]; } } } } __global__ void sgd_with_momentum(float* w, const float* dw, float learning_rate, float momentum, float* v, unsigned int len) { int tid = blockIdx.x*blockDim.x + threadIdx.x; if (tid < len) { v[tid] = momentum * v[tid] + dw[tid]; w[tid] -= learning_rate * v[tid]; } } }
.text .file "kernels.hip" .globl __device_stub__fill_u8 # -- Begin function __device_stub__fill_u8 .p2align 4, 0x90 .type __device_stub__fill_u8,@function __device_stub__fill_u8: # @__device_stub__fill_u8 .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movb %sil, 3(%rsp) movl %edx, 4(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 3(%rsp), %rax movq %rax, 72(%rsp) leaq 4(%rsp), %rax movq %rax, 80(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $fill_u8, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size __device_stub__fill_u8, .Lfunc_end0-__device_stub__fill_u8 .cfi_endproc # -- End function .globl __device_stub__fill_u32 # -- Begin function __device_stub__fill_u32 .p2align 4, 0x90 .type __device_stub__fill_u32,@function __device_stub__fill_u32: # @__device_stub__fill_u32 .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) movq %rsp, %rax movq %rax, 80(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $fill_u32, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end1: .size __device_stub__fill_u32, .Lfunc_end1-__device_stub__fill_u32 .cfi_endproc # -- End function .globl __device_stub__u8_to_f32 # -- Begin function __device_stub__u8_to_f32 .p2align 4, 0x90 .type __device_stub__u8_to_f32,@function __device_stub__u8_to_f32: # @__device_stub__u8_to_f32 .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $u8_to_f32, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end2: .size __device_stub__u8_to_f32, .Lfunc_end2-__device_stub__u8_to_f32 .cfi_endproc # -- End function .globl __device_stub__u8_to_one_hot_f32 # -- Begin function __device_stub__u8_to_one_hot_f32 .p2align 4, 0x90 .type __device_stub__u8_to_one_hot_f32,@function __device_stub__u8_to_one_hot_f32: # @__device_stub__u8_to_one_hot_f32 .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movq %rdx, 64(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $u8_to_one_hot_f32, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end3: .size __device_stub__u8_to_one_hot_f32, .Lfunc_end3-__device_stub__u8_to_one_hot_f32 .cfi_endproc # -- End function .globl __device_stub__broadcast # -- Begin function __device_stub__broadcast .p2align 4, 0x90 .type __device_stub__broadcast,@function __device_stub__broadcast: # @__device_stub__broadcast .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $broadcast, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end4: .size __device_stub__broadcast, .Lfunc_end4-__device_stub__broadcast .cfi_endproc # -- End function .globl __device_stub__broadcast_backward # -- Begin function __device_stub__broadcast_backward .p2align 4, 0x90 .type __device_stub__broadcast_backward,@function __device_stub__broadcast_backward: # @__device_stub__broadcast_backward .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $broadcast_backward, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end5: .size __device_stub__broadcast_backward, .Lfunc_end5-__device_stub__broadcast_backward .cfi_endproc # -- End function .globl __device_stub__add # -- Begin function __device_stub__add .p2align 4, 0x90 .type __device_stub__add,@function __device_stub__add: # @__device_stub__add .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $add, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end6: .size __device_stub__add, .Lfunc_end6-__device_stub__add .cfi_endproc # -- End function .globl __device_stub__cross_entropy_forward # -- Begin function __device_stub__cross_entropy_forward .p2align 4, 0x90 .type __device_stub__cross_entropy_forward,@function __device_stub__cross_entropy_forward: # @__device_stub__cross_entropy_forward .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 4(%rsp) movl %esi, (%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) leaq 4(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) leaq 72(%rsp), %rax movq %rax, 96(%rsp) leaq 64(%rsp), %rax movq %rax, 104(%rsp) leaq 56(%rsp), %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $cross_entropy_forward, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end7: .size __device_stub__cross_entropy_forward, .Lfunc_end7-__device_stub__cross_entropy_forward .cfi_endproc # -- End function .globl __device_stub__cross_entropy_backward # -- Begin function __device_stub__cross_entropy_backward .p2align 4, 0x90 .type __device_stub__cross_entropy_backward,@function __device_stub__cross_entropy_backward: # @__device_stub__cross_entropy_backward .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movl %r8d, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 96(%rsp), %r9 movl $cross_entropy_backward, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $152, %rsp .cfi_adjust_cfa_offset -152 retq .Lfunc_end8: .size __device_stub__cross_entropy_backward, .Lfunc_end8-__device_stub__cross_entropy_backward .cfi_endproc # -- End function .globl __device_stub__reduce_sum_partial # -- Begin function __device_stub__reduce_sum_partial .p2align 4, 0x90 .type __device_stub__reduce_sum_partial,@function __device_stub__reduce_sum_partial: # @__device_stub__reduce_sum_partial .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $reduce_sum_partial, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end9: .size __device_stub__reduce_sum_partial, .Lfunc_end9-__device_stub__reduce_sum_partial .cfi_endproc # -- End function .globl __device_stub__reduce_sum_final # -- Begin function __device_stub__reduce_sum_final .p2align 4, 0x90 .type __device_stub__reduce_sum_final,@function __device_stub__reduce_sum_final: # @__device_stub__reduce_sum_final .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $reduce_sum_final, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end10: .size __device_stub__reduce_sum_final, .Lfunc_end10-__device_stub__reduce_sum_final .cfi_endproc # -- End function .globl __device_stub__reverse_conv_filter # -- Begin function __device_stub__reverse_conv_filter .p2align 4, 0x90 .type __device_stub__reverse_conv_filter,@function __device_stub__reverse_conv_filter: # @__device_stub__reverse_conv_filter .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movss %xmm0, 12(%rsp) movq %rsi, 64(%rsp) movl %edx, 8(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 4(%rsp), %rax movq %rax, 112(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $reverse_conv_filter, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end11: .size __device_stub__reverse_conv_filter, .Lfunc_end11-__device_stub__reverse_conv_filter .cfi_endproc # -- End function .globl __device_stub__sgd_with_momentum # -- Begin function __device_stub__sgd_with_momentum .p2align 4, 0x90 .type __device_stub__sgd_with_momentum,@function __device_stub__sgd_with_momentum: # @__device_stub__sgd_with_momentum .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movss %xmm0, 20(%rsp) movss %xmm1, 16(%rsp) movq %rdx, 72(%rsp) movl %ecx, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 20(%rsp), %rax movq %rax, 112(%rsp) leaq 16(%rsp), %rax movq %rax, 120(%rsp) leaq 72(%rsp), %rax movq %rax, 128(%rsp) leaq 12(%rsp), %rax movq %rax, 136(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $sgd_with_momentum, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end12: .size __device_stub__sgd_with_momentum, .Lfunc_end12-__device_stub__sgd_with_momentum .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB13_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB13_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $fill_u8, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $fill_u32, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $u8_to_f32, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $u8_to_one_hot_f32, %esi movl $.L__unnamed_4, %edx movl $.L__unnamed_4, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $broadcast, %esi movl $.L__unnamed_5, %edx movl $.L__unnamed_5, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $broadcast_backward, %esi movl $.L__unnamed_6, %edx movl $.L__unnamed_6, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $add, %esi movl $.L__unnamed_7, %edx movl $.L__unnamed_7, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $cross_entropy_forward, %esi movl $.L__unnamed_8, %edx movl $.L__unnamed_8, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $cross_entropy_backward, %esi movl $.L__unnamed_9, %edx movl $.L__unnamed_9, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $reduce_sum_partial, %esi movl $.L__unnamed_10, %edx movl $.L__unnamed_10, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $reduce_sum_final, %esi movl $.L__unnamed_11, %edx movl $.L__unnamed_11, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $reverse_conv_filter, %esi movl $.L__unnamed_12, %edx movl $.L__unnamed_12, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $sgd_with_momentum, %esi movl $.L__unnamed_13, %edx movl $.L__unnamed_13, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end13: .size __hip_module_ctor, .Lfunc_end13-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB14_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB14_2: retq .Lfunc_end14: .size __hip_module_dtor, .Lfunc_end14-__hip_module_dtor .cfi_endproc # -- End function .type fill_u8,@object # @fill_u8 .section .rodata,"a",@progbits .globl fill_u8 .p2align 3, 0x0 fill_u8: .quad __device_stub__fill_u8 .size fill_u8, 8 .type fill_u32,@object # @fill_u32 .globl fill_u32 .p2align 3, 0x0 fill_u32: .quad __device_stub__fill_u32 .size fill_u32, 8 .type u8_to_f32,@object # @u8_to_f32 .globl u8_to_f32 .p2align 3, 0x0 u8_to_f32: .quad __device_stub__u8_to_f32 .size u8_to_f32, 8 .type u8_to_one_hot_f32,@object # @u8_to_one_hot_f32 .globl u8_to_one_hot_f32 .p2align 3, 0x0 u8_to_one_hot_f32: .quad __device_stub__u8_to_one_hot_f32 .size u8_to_one_hot_f32, 8 .type broadcast,@object # @broadcast .globl broadcast .p2align 3, 0x0 broadcast: .quad __device_stub__broadcast .size broadcast, 8 .type broadcast_backward,@object # @broadcast_backward .globl broadcast_backward .p2align 3, 0x0 broadcast_backward: .quad __device_stub__broadcast_backward .size broadcast_backward, 8 .type add,@object # @add .globl add .p2align 3, 0x0 add: .quad __device_stub__add .size add, 8 .type cross_entropy_forward,@object # @cross_entropy_forward .globl cross_entropy_forward .p2align 3, 0x0 cross_entropy_forward: .quad __device_stub__cross_entropy_forward .size cross_entropy_forward, 8 .type cross_entropy_backward,@object # @cross_entropy_backward .globl cross_entropy_backward .p2align 3, 0x0 cross_entropy_backward: .quad __device_stub__cross_entropy_backward .size cross_entropy_backward, 8 .type reduce_sum_partial,@object # @reduce_sum_partial .globl reduce_sum_partial .p2align 3, 0x0 reduce_sum_partial: .quad __device_stub__reduce_sum_partial .size reduce_sum_partial, 8 .type reduce_sum_final,@object # @reduce_sum_final .globl reduce_sum_final .p2align 3, 0x0 reduce_sum_final: .quad __device_stub__reduce_sum_final .size reduce_sum_final, 8 .type reverse_conv_filter,@object # @reverse_conv_filter .globl reverse_conv_filter .p2align 3, 0x0 reverse_conv_filter: .quad __device_stub__reverse_conv_filter .size reverse_conv_filter, 8 .type sgd_with_momentum,@object # @sgd_with_momentum .globl sgd_with_momentum .p2align 3, 0x0 sgd_with_momentum: .quad __device_stub__sgd_with_momentum .size sgd_with_momentum, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "fill_u8" .size .L__unnamed_1, 8 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "fill_u32" .size .L__unnamed_2, 9 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "u8_to_f32" .size .L__unnamed_3, 10 .type .L__unnamed_4,@object # @3 .L__unnamed_4: .asciz "u8_to_one_hot_f32" .size .L__unnamed_4, 18 .type .L__unnamed_5,@object # @4 .L__unnamed_5: .asciz "broadcast" .size .L__unnamed_5, 10 .type .L__unnamed_6,@object # @5 .L__unnamed_6: .asciz "broadcast_backward" .size .L__unnamed_6, 19 .type .L__unnamed_7,@object # @6 .L__unnamed_7: .asciz "add" .size .L__unnamed_7, 4 .type .L__unnamed_8,@object # @7 .L__unnamed_8: .asciz "cross_entropy_forward" .size .L__unnamed_8, 22 .type .L__unnamed_9,@object # @8 .L__unnamed_9: .asciz "cross_entropy_backward" .size .L__unnamed_9, 23 .type .L__unnamed_10,@object # @9 .L__unnamed_10: .asciz "reduce_sum_partial" .size .L__unnamed_10, 19 .type .L__unnamed_11,@object # @10 .L__unnamed_11: .asciz "reduce_sum_final" .size .L__unnamed_11, 17 .type .L__unnamed_12,@object # @11 .L__unnamed_12: .asciz "reverse_conv_filter" .size .L__unnamed_12, 20 .type .L__unnamed_13,@object # @12 .L__unnamed_13: .asciz "sgd_with_momentum" .size .L__unnamed_13, 18 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__fill_u8 .addrsig_sym __device_stub__fill_u32 .addrsig_sym __device_stub__u8_to_f32 .addrsig_sym __device_stub__u8_to_one_hot_f32 .addrsig_sym __device_stub__broadcast .addrsig_sym __device_stub__broadcast_backward .addrsig_sym __device_stub__add .addrsig_sym __device_stub__cross_entropy_forward .addrsig_sym __device_stub__cross_entropy_backward .addrsig_sym __device_stub__reduce_sum_partial .addrsig_sym __device_stub__reduce_sum_final .addrsig_sym __device_stub__reverse_conv_filter .addrsig_sym __device_stub__sgd_with_momentum .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym fill_u8 .addrsig_sym fill_u32 .addrsig_sym u8_to_f32 .addrsig_sym u8_to_one_hot_f32 .addrsig_sym broadcast .addrsig_sym broadcast_backward .addrsig_sym add .addrsig_sym cross_entropy_forward .addrsig_sym cross_entropy_backward .addrsig_sym reduce_sum_partial .addrsig_sym reduce_sum_final .addrsig_sym reverse_conv_filter .addrsig_sym sgd_with_momentum .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
# include <stdio.h> # include <stdlib.h> // To use the exit function and malloc # include <string.h> /* * ============================================ * Find a word in a given string (CUDA version) * ============================================ * * Usage: find_word <word> <input_file> * * Given a word, load the first line of the input file and * search the word in it. This version uses a CUDA-enabled * graphics card. */ // Global constant # define NOT_FOUND (-1) # define THREADS_PER_BLOCK (128) // Function declaration int find_word_in_gpu(char *word, char *search_here); // ---------------------------------------------------------------------------- // Kernel definition void __global__ find_word_kernel(char *word, char *search_here, int ref_length, int *result) { /* * Search for the given word in the search_here string. * * At first occurrence, returns the starting position. If the word was not * found, return NOT_FOUND. */ // 1. --- > Prepare for execution // Allocate shared memory for the result __shared__ int found_here[THREADS_PER_BLOCK]; // The starting position of each thread int start = (blockDim.x * blockIdx.x) + threadIdx.x; // The shared memory index for this thread int shared_idx = threadIdx.x; // 2. --- > Search the word if (start < ref_length-1) { // Check for a valid position int found = 1; // Pretend you found it int letters_coincide; // ---> Check if the word is found from here for (int j=0; word[j] != '\0'; j++) { // Check if the letters coincide letters_coincide = (search_here[start+j] == word[j]); found = (found && letters_coincide); } // ---> Place your mark if (found) { // Place position if it was found found_here[shared_idx] = start; } else { found_here[shared_idx] = 0; } } else { // Non working thread, initialize shared memory // You will definitely NOT find it here found_here[start] = 0; } // Wait until everyone finishes __syncthreads(); // 3. --- > Reduce the result on every thread // ---> Reduce the results to one per block int threads_per_block = blockDim.x; int i = (threads_per_block+1)/2; while( i != 0 ) { // Reduce halving the results on each iteration if (threadIdx.x < i) { // Check if the entries are within reach if ( shared_idx + i < threads_per_block ) { // Check if it was found here found_here[shared_idx] = (found_here[shared_idx] ? found_here[shared_idx] : found_here[shared_idx+i]); } } // Prepare the next reduction i/=2; __syncthreads(); } // 4. --- > Save the block's reduction and return if (threadIdx.x == 0) { result[blockIdx.x] = found_here[shared_idx]; } return; } // --- find_word_kernel // ---------------------------------------------------------------------------- /* --- << Main function >> --- */ int main(int argc, char *argv[]) { // 1. ---> Find the input file and the word to search char *search_here = argv[1]; char *word = argv[2]; // 2. ---> Search the word in the reference string int found_here = find_word_in_gpu(word, search_here); // 3. ---> Display the results if( found_here == NOT_FOUND ) { // The word was not found printf("Sorry, the word was not found in the reference string\n"); printf("Word: %s\nReference string: %s\n\n", word, search_here); } else { // The word was found printf("The word was found at position: %d\n", found_here); // Signal the position printf("Word: %s\nReference string: %s\n", word, search_here); printf(" "); for (int i=0; i < found_here-1; i++) printf(" "); printf("^\n\n"); } // 4. ---> Finish! return 0; } // --- main // ---------------------------------------------------------------------------- /* --- << Functions >> --- */ // --- --- - --- --- - --- --- - --- --- - --- --- - --- --- - --- -- int find_word_in_gpu(char *word, char *search_here) { /* * Search for the given word in the search_here string. * * At first occurrence, returns the starting position. If the word was not * found, return NOT_FOUND. Uses a CUDA-enabled graphics card. */ // 1. --- > Prepare the data in the CPU // Lookup the lengths of the words int word_length = strlen(word); int str_length = strlen(search_here); int found_here = NOT_FOUND; // Copy the word to the GPU char *word_tmp; cudaMallocManaged(&word_tmp, word_length * sizeof(char)); strcpy(word_tmp, word); // Copy the search_string to the GPU char *str_tmp; cudaMallocManaged(&str_tmp, str_length * sizeof(char)); strcpy(str_tmp, search_here); // 2. --- > Prepare and launch the Kernel // Calculate the total threads to use (one per window) int total_threads = (str_length - word_length) + 1; // Calculate the blocks needed for that int blocks = (total_threads + THREADS_PER_BLOCK-1) / THREADS_PER_BLOCK; printf("Launching %d threads in %d blocks\n", THREADS_PER_BLOCK, blocks); // Prepare for the arrival of the results int *partial_results; cudaMallocManaged(&partial_results, blocks * sizeof(int)); for (int i=0; i < blocks; i++) { partial_results[i] = 0; } // Launch the kernel find_word_kernel<<<blocks, THREADS_PER_BLOCK>>>(word_tmp, str_tmp, str_length, partial_results); cudaDeviceSynchronize(); // 3. --- > Analyze the result for (int i=0; i<blocks; i++) { if ( partial_results[i] ) { found_here = partial_results[i]; break; } } // 4. ---> Cleanup and return // Free unneeded memory cudaFree(partial_results); cudaFree(word_tmp); cudaFree(str_tmp); return found_here; } // --- find_word_in_gpu
code for sm_80 Function : _Z16find_word_kernelPcS_iPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */ /* 0x000e220000002500 */ /*0020*/ ULDC UR4, c[0x0][0x170] ; /* 0x00005c0000047ab9 */ /* 0x000fe20000000800 */ /*0030*/ BSSY B0, 0x280 ; /* 0x0000024000007945 */ /* 0x000fe20003800000 */ /*0040*/ UIADD3 UR4, UR4, -0x1, URZ ; /* 0xffffffff04047890 */ /* 0x000fe2000fffe03f */ /*0050*/ S2R R11, SR_TID.X ; /* 0x00000000000b7919 */ /* 0x000e220000002100 */ /*0060*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */ /* 0x000fe20000000a00 */ /*0070*/ IMAD R0, R8, c[0x0][0x0], R11 ; /* 0x0000000008007a24 */ /* 0x001fe400078e020b */ /*0080*/ IMAD.SHL.U32 R6, R11, 0x4, RZ ; /* 0x000000040b067824 */ /* 0x000fc600078e00ff */ /*0090*/ ISETP.GE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */ /* 0x000fda000bf06270 */ /*00a0*/ @P0 STS [R0.X4], RZ ; /* 0x000000ff00000388 */ /* 0x0001e20000004800 */ /*00b0*/ @P0 BRA 0x270 ; /* 0x000001b000000947 */ /* 0x000fea0003800000 */ /*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff027624 */ /* 0x000fe400078e00ff */ /*00d0*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */ /* 0x000fca00078e00ff */ /*00e0*/ LDG.E.U8 R2, [R2.64] ; /* 0x0000000802027981 */ /* 0x000ea2000c1e1100 */ /*00f0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0100*/ ISETP.NE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x004fda0003f25270 */ /*0110*/ @!P1 BRA 0x250 ; /* 0x0000013000009947 */ /* 0x000fea0003800000 */ /*0120*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fe20003f0f070 */ /*0130*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fe20008000000 */ /*0140*/ PRMT R7, R2, 0x7610, R7 ; /* 0x0000761002077816 */ /* 0x000fe40000000007 */ /*0150*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */ /* 0x000fe20000000a00 */ /*0160*/ IADD3 R3, R0, UR4, RZ ; /* 0x0000000400037c10 */ /* 0x000fe2000fffe0ff */ /*0170*/ UIADD3 UR5, UP0, UR4, UR6, URZ ; /* 0x0000000604057290 */ /* 0x000fe2000ff1e03f */ /*0180*/ PRMT R9, R7, 0x9910, RZ ; /* 0x0000991007097816 */ /* 0x000fe400000000ff */ /*0190*/ IADD3 R2, P1, R3.reuse, c[0x0][0x168], RZ ; /* 0x00005a0003027a10 */ /* 0x040fe20007f3e0ff */ /*01a0*/ ULEA.HI.X.SX32 UR6, UR4, UR7, 0x1, UP0 ; /* 0x0000000704067291 */ /* 0x000fe400080f0e3f */ /*01b0*/ IMAD.U32 R4, RZ, RZ, UR5 ; /* 0x00000005ff047e24 */ /* 0x000fe2000f8e00ff */ /*01c0*/ LEA.HI.X.SX32 R3, R3, c[0x0][0x16c], 0x1, P1 ; /* 0x00005b0003037a11 */ /* 0x000fc600008f0eff */ /*01d0*/ IMAD.U32 R5, RZ, RZ, UR6 ; /* 0x00000006ff057e24 */ /* 0x000fe4000f8e00ff */ /*01e0*/ LDG.E.U8 R2, [R2.64] ; /* 0x0000000802027981 */ /* 0x000ea8000c1e1100 */ /*01f0*/ LDG.E.U8 R7, [R4.64+0x1] ; /* 0x0000010804077981 */ /* 0x000ee2000c1e1100 */ /*0200*/ UIADD3 UR4, UR4, 0x1, URZ ; /* 0x0000000104047890 */ /* 0x000fe2000fffe03f */ /*0210*/ ISETP.EQ.AND P0, PT, R2, R9, P0 ; /* 0x000000090200720c */ /* 0x004fe40000702270 */ /*0220*/ ISETP.NE.AND P1, PT, R7, RZ, PT ; /* 0x000000ff0700720c */ /* 0x008fda0003f25270 */ /*0230*/ @P1 BRA 0x150 ; /* 0xffffff1000001947 */ /* 0x000fea000383ffff */ /*0240*/ PLOP3.LUT P0, PT, P0, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fda000070e170 */ /*0250*/ @!P0 STS [R11.X4], R0 ; /* 0x000000000b008388 */ /* 0x0003e80000004800 */ /*0260*/ @P0 STS [R11.X4], RZ ; /* 0x000000ff0b000388 */ /* 0x0003e40000004800 */ /*0270*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0280*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff007624 */ /* 0x003fe200078e00ff */ /*0290*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*02a0*/ ISETP.NE.AND P1, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */ /* 0x000fc60003f25270 */ /*02b0*/ IADD3 R2, R0, 0x2, RZ ; /* 0x0000000200027810 */ /* 0x000fc80007ffe0ff */ /*02c0*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */ /* 0x000fda0003f06070 */ /*02d0*/ @!P0 BRA 0x3e0 ; /* 0x0000010000008947 */ /* 0x000fea0003800000 */ /*02e0*/ IADD3 R0, R0, 0x1, RZ ; /* 0x0000000100007810 */ /* 0x000fc80007ffe0ff */ /*02f0*/ LEA.HI R0, R0, R0, RZ, 0x1 ; /* 0x0000000000007211 */ /* 0x000fc800078f08ff */ /*0300*/ SHF.R.S32.HI R0, RZ, 0x1, R0 ; /* 0x00000001ff007819 */ /* 0x000fca0000011400 */ /*0310*/ IMAD.IADD R2, R0, 0x1, R11 ; /* 0x0000000100027824 */ /* 0x000fca00078e020b */ /*0320*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x0], PT ; /* 0x0000000002007a0c */ /* 0x000fc80003f06270 */ /*0330*/ ISETP.GE.U32.OR P0, PT, R11, R0, P0 ; /* 0x000000000b00720c */ /* 0x000fda0000706470 */ /*0340*/ @!P0 LDS R2, [R11.X4] ; /* 0x000000000b028984 */ /* 0x000e240000004800 */ /*0350*/ @!P0 ISETP.NE.AND P2, PT, R2, RZ, PT ; /* 0x000000ff0200820c */ /* 0x001fe40003f45270 */ /*0360*/ IADD3 R2, R0.reuse, 0x1, RZ ; /* 0x0000000100027810 */ /* 0x040fe40007ffe0ff */ /*0370*/ @!P0 SEL R3, R0, RZ, !P2 ; /* 0x000000ff00038207 */ /* 0x000fca0005000000 */ /*0380*/ @!P0 IMAD R3, R3, 0x4, R6 ; /* 0x0000000403038824 */ /* 0x000fcc00078e0206 */ /*0390*/ @!P0 LDS R3, [R3] ; /* 0x0000000003038984 */ /* 0x000e280000000800 */ /*03a0*/ @!P0 STS [R11.X4], R3 ; /* 0x000000030b008388 */ /* 0x0011e80000004800 */ /*03b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*03c0*/ ISETP.GT.U32.AND P0, PT, R2, 0x2, PT ; /* 0x000000020200780c */ /* 0x000fda0003f04070 */ /*03d0*/ @P0 BRA 0x2f0 ; /* 0xffffff1000000947 */ /* 0x001fea000383ffff */ /*03e0*/ @P1 EXIT ; /* 0x000000000000194d */ /* 0x000fea0003800000 */ /*03f0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */ /* 0x000e220000000800 */ /*0400*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fc800078e00ff */ /*0410*/ IMAD.WIDE.U32 R2, R8, R3, c[0x0][0x178] ; /* 0x00005e0008027625 */ /* 0x000fca00078e0003 */ /*0420*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x001fe2000c101908 */ /*0430*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0440*/ BRA 0x440; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0450*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0460*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0470*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0480*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0490*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
# include <stdio.h> # include <stdlib.h> // To use the exit function and malloc # include <string.h> /* * ============================================ * Find a word in a given string (CUDA version) * ============================================ * * Usage: find_word <word> <input_file> * * Given a word, load the first line of the input file and * search the word in it. This version uses a CUDA-enabled * graphics card. */ // Global constant # define NOT_FOUND (-1) # define THREADS_PER_BLOCK (128) // Function declaration int find_word_in_gpu(char *word, char *search_here); // ---------------------------------------------------------------------------- // Kernel definition void __global__ find_word_kernel(char *word, char *search_here, int ref_length, int *result) { /* * Search for the given word in the search_here string. * * At first occurrence, returns the starting position. If the word was not * found, return NOT_FOUND. */ // 1. --- > Prepare for execution // Allocate shared memory for the result __shared__ int found_here[THREADS_PER_BLOCK]; // The starting position of each thread int start = (blockDim.x * blockIdx.x) + threadIdx.x; // The shared memory index for this thread int shared_idx = threadIdx.x; // 2. --- > Search the word if (start < ref_length-1) { // Check for a valid position int found = 1; // Pretend you found it int letters_coincide; // ---> Check if the word is found from here for (int j=0; word[j] != '\0'; j++) { // Check if the letters coincide letters_coincide = (search_here[start+j] == word[j]); found = (found && letters_coincide); } // ---> Place your mark if (found) { // Place position if it was found found_here[shared_idx] = start; } else { found_here[shared_idx] = 0; } } else { // Non working thread, initialize shared memory // You will definitely NOT find it here found_here[start] = 0; } // Wait until everyone finishes __syncthreads(); // 3. --- > Reduce the result on every thread // ---> Reduce the results to one per block int threads_per_block = blockDim.x; int i = (threads_per_block+1)/2; while( i != 0 ) { // Reduce halving the results on each iteration if (threadIdx.x < i) { // Check if the entries are within reach if ( shared_idx + i < threads_per_block ) { // Check if it was found here found_here[shared_idx] = (found_here[shared_idx] ? found_here[shared_idx] : found_here[shared_idx+i]); } } // Prepare the next reduction i/=2; __syncthreads(); } // 4. --- > Save the block's reduction and return if (threadIdx.x == 0) { result[blockIdx.x] = found_here[shared_idx]; } return; } // --- find_word_kernel // ---------------------------------------------------------------------------- /* --- << Main function >> --- */ int main(int argc, char *argv[]) { // 1. ---> Find the input file and the word to search char *search_here = argv[1]; char *word = argv[2]; // 2. ---> Search the word in the reference string int found_here = find_word_in_gpu(word, search_here); // 3. ---> Display the results if( found_here == NOT_FOUND ) { // The word was not found printf("Sorry, the word was not found in the reference string\n"); printf("Word: %s\nReference string: %s\n\n", word, search_here); } else { // The word was found printf("The word was found at position: %d\n", found_here); // Signal the position printf("Word: %s\nReference string: %s\n", word, search_here); printf(" "); for (int i=0; i < found_here-1; i++) printf(" "); printf("^\n\n"); } // 4. ---> Finish! return 0; } // --- main // ---------------------------------------------------------------------------- /* --- << Functions >> --- */ // --- --- - --- --- - --- --- - --- --- - --- --- - --- --- - --- -- int find_word_in_gpu(char *word, char *search_here) { /* * Search for the given word in the search_here string. * * At first occurrence, returns the starting position. If the word was not * found, return NOT_FOUND. Uses a CUDA-enabled graphics card. */ // 1. --- > Prepare the data in the CPU // Lookup the lengths of the words int word_length = strlen(word); int str_length = strlen(search_here); int found_here = NOT_FOUND; // Copy the word to the GPU char *word_tmp; cudaMallocManaged(&word_tmp, word_length * sizeof(char)); strcpy(word_tmp, word); // Copy the search_string to the GPU char *str_tmp; cudaMallocManaged(&str_tmp, str_length * sizeof(char)); strcpy(str_tmp, search_here); // 2. --- > Prepare and launch the Kernel // Calculate the total threads to use (one per window) int total_threads = (str_length - word_length) + 1; // Calculate the blocks needed for that int blocks = (total_threads + THREADS_PER_BLOCK-1) / THREADS_PER_BLOCK; printf("Launching %d threads in %d blocks\n", THREADS_PER_BLOCK, blocks); // Prepare for the arrival of the results int *partial_results; cudaMallocManaged(&partial_results, blocks * sizeof(int)); for (int i=0; i < blocks; i++) { partial_results[i] = 0; } // Launch the kernel find_word_kernel<<<blocks, THREADS_PER_BLOCK>>>(word_tmp, str_tmp, str_length, partial_results); cudaDeviceSynchronize(); // 3. --- > Analyze the result for (int i=0; i<blocks; i++) { if ( partial_results[i] ) { found_here = partial_results[i]; break; } } // 4. ---> Cleanup and return // Free unneeded memory cudaFree(partial_results); cudaFree(word_tmp); cudaFree(str_tmp); return found_here; } // --- find_word_in_gpu
.file "tmpxft_000f4f9d_00000000-6_find_word_shared.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z41__device_stub__Z16find_word_kernelPcS_iPiPcS_iPi .type _Z41__device_stub__Z16find_word_kernelPcS_iPiPcS_iPi, @function _Z41__device_stub__Z16find_word_kernelPcS_iPiPcS_iPi: .LFB2083: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z16find_word_kernelPcS_iPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z41__device_stub__Z16find_word_kernelPcS_iPiPcS_iPi, .-_Z41__device_stub__Z16find_word_kernelPcS_iPiPcS_iPi .globl _Z16find_word_kernelPcS_iPi .type _Z16find_word_kernelPcS_iPi, @function _Z16find_word_kernelPcS_iPi: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z41__device_stub__Z16find_word_kernelPcS_iPiPcS_iPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z16find_word_kernelPcS_iPi, .-_Z16find_word_kernelPcS_iPi .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "Launching %d threads in %d blocks\n" .text .globl _Z16find_word_in_gpuPcS_ .type _Z16find_word_in_gpuPcS_, @function _Z16find_word_in_gpuPcS_: .LFB2058: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $64, %rsp .cfi_def_cfa_offset 112 movq %rdi, %r14 movq %rsi, %r13 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax call strlen@PLT movq %rax, %rbx movq %r13, %rdi call strlen@PLT movq %rax, %rbp movl %eax, %r12d movslq %ebx, %rsi leaq 8(%rsp), %rdi movl $1, %edx call cudaMallocManaged@PLT movq %r14, %rsi movq 8(%rsp), %rdi call strcpy@PLT movslq %ebp, %rsi leaq 16(%rsp), %rdi movl $1, %edx call cudaMallocManaged@PLT movq %r13, %rsi movq 16(%rsp), %rdi call strcpy@PLT subl %ebx, %ebp leal 255(%rbp), %ebx movl %ebp, %eax subl $-128, %eax cmovns %eax, %ebx sarl $7, %ebx movl %ebx, %ecx movl $128, %edx leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movslq %ebx, %rsi salq $2, %rsi leaq 24(%rsp), %rdi movl $1, %edx call cudaMallocManaged@PLT testl %ebp, %ebp js .L12 movl $0, %eax .L13: movq 24(%rsp), %rdx movl $0, (%rdx,%rax,4) addq $1, %rax cmpl %eax, %ebx jg .L13 movl $128, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl %ebx, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L18 call cudaDeviceSynchronize@PLT .L19: movq 24(%rsp), %rdx movl $0, %eax .L16: movl (%rdx,%rax,4), %ebp testl %ebp, %ebp jne .L15 addq $1, %rax cmpl %eax, %ebx jg .L16 movl $-1, %ebp .L15: movq 24(%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L26 movl %ebp, %eax addq $64, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L17: .cfi_restore_state call cudaDeviceSynchronize@PLT movl $-1, %ebp jmp .L15 .L12: movl $128, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl %ebx, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L17 .L18: movq 24(%rsp), %rcx movl %r12d, %edx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z41__device_stub__Z16find_word_kernelPcS_iPiPcS_iPi call cudaDeviceSynchronize@PLT testl %ebp, %ebp jns .L19 movl $-1, %ebp jmp .L15 .L26: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size _Z16find_word_in_gpuPcS_, .-_Z16find_word_in_gpuPcS_ .section .rodata.str1.8 .align 8 .LC1: .string "Sorry, the word was not found in the reference string\n" .align 8 .LC2: .string "Word: %s\nReference string: %s\n\n" .align 8 .LC3: .string "The word was found at position: %d\n" .align 8 .LC4: .string "Word: %s\nReference string: %s\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC5: .string " " .LC6: .string " " .LC7: .string "^\n\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movq 8(%rsi), %r12 movq 16(%rsi), %rbx movq %r12, %rsi movq %rbx, %rdi call _Z16find_word_in_gpuPcS_ cmpl $-1, %eax je .L34 movl %eax, %ebp movl %eax, %edx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %r12, %rcx movq %rbx, %rdx leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT cmpl $1, %ebp jle .L30 subl $1, %ebp movl $0, %ebx leaq .LC6(%rip), %r12 .L31: movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %ebx cmpl %ebp, %ebx jne .L31 .L30: leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L29: movl $0, %eax popq %rbx .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L34: .cfi_restore_state leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %r12, %rcx movq %rbx, %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L29 .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC8: .string "_Z16find_word_kernelPcS_iPi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq _Z16find_word_kernelPcS_iPi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
# include <stdio.h> # include <stdlib.h> // To use the exit function and malloc # include <string.h> /* * ============================================ * Find a word in a given string (CUDA version) * ============================================ * * Usage: find_word <word> <input_file> * * Given a word, load the first line of the input file and * search the word in it. This version uses a CUDA-enabled * graphics card. */ // Global constant # define NOT_FOUND (-1) # define THREADS_PER_BLOCK (128) // Function declaration int find_word_in_gpu(char *word, char *search_here); // ---------------------------------------------------------------------------- // Kernel definition void __global__ find_word_kernel(char *word, char *search_here, int ref_length, int *result) { /* * Search for the given word in the search_here string. * * At first occurrence, returns the starting position. If the word was not * found, return NOT_FOUND. */ // 1. --- > Prepare for execution // Allocate shared memory for the result __shared__ int found_here[THREADS_PER_BLOCK]; // The starting position of each thread int start = (blockDim.x * blockIdx.x) + threadIdx.x; // The shared memory index for this thread int shared_idx = threadIdx.x; // 2. --- > Search the word if (start < ref_length-1) { // Check for a valid position int found = 1; // Pretend you found it int letters_coincide; // ---> Check if the word is found from here for (int j=0; word[j] != '\0'; j++) { // Check if the letters coincide letters_coincide = (search_here[start+j] == word[j]); found = (found && letters_coincide); } // ---> Place your mark if (found) { // Place position if it was found found_here[shared_idx] = start; } else { found_here[shared_idx] = 0; } } else { // Non working thread, initialize shared memory // You will definitely NOT find it here found_here[start] = 0; } // Wait until everyone finishes __syncthreads(); // 3. --- > Reduce the result on every thread // ---> Reduce the results to one per block int threads_per_block = blockDim.x; int i = (threads_per_block+1)/2; while( i != 0 ) { // Reduce halving the results on each iteration if (threadIdx.x < i) { // Check if the entries are within reach if ( shared_idx + i < threads_per_block ) { // Check if it was found here found_here[shared_idx] = (found_here[shared_idx] ? found_here[shared_idx] : found_here[shared_idx+i]); } } // Prepare the next reduction i/=2; __syncthreads(); } // 4. --- > Save the block's reduction and return if (threadIdx.x == 0) { result[blockIdx.x] = found_here[shared_idx]; } return; } // --- find_word_kernel // ---------------------------------------------------------------------------- /* --- << Main function >> --- */ int main(int argc, char *argv[]) { // 1. ---> Find the input file and the word to search char *search_here = argv[1]; char *word = argv[2]; // 2. ---> Search the word in the reference string int found_here = find_word_in_gpu(word, search_here); // 3. ---> Display the results if( found_here == NOT_FOUND ) { // The word was not found printf("Sorry, the word was not found in the reference string\n"); printf("Word: %s\nReference string: %s\n\n", word, search_here); } else { // The word was found printf("The word was found at position: %d\n", found_here); // Signal the position printf("Word: %s\nReference string: %s\n", word, search_here); printf(" "); for (int i=0; i < found_here-1; i++) printf(" "); printf("^\n\n"); } // 4. ---> Finish! return 0; } // --- main // ---------------------------------------------------------------------------- /* --- << Functions >> --- */ // --- --- - --- --- - --- --- - --- --- - --- --- - --- --- - --- -- int find_word_in_gpu(char *word, char *search_here) { /* * Search for the given word in the search_here string. * * At first occurrence, returns the starting position. If the word was not * found, return NOT_FOUND. Uses a CUDA-enabled graphics card. */ // 1. --- > Prepare the data in the CPU // Lookup the lengths of the words int word_length = strlen(word); int str_length = strlen(search_here); int found_here = NOT_FOUND; // Copy the word to the GPU char *word_tmp; cudaMallocManaged(&word_tmp, word_length * sizeof(char)); strcpy(word_tmp, word); // Copy the search_string to the GPU char *str_tmp; cudaMallocManaged(&str_tmp, str_length * sizeof(char)); strcpy(str_tmp, search_here); // 2. --- > Prepare and launch the Kernel // Calculate the total threads to use (one per window) int total_threads = (str_length - word_length) + 1; // Calculate the blocks needed for that int blocks = (total_threads + THREADS_PER_BLOCK-1) / THREADS_PER_BLOCK; printf("Launching %d threads in %d blocks\n", THREADS_PER_BLOCK, blocks); // Prepare for the arrival of the results int *partial_results; cudaMallocManaged(&partial_results, blocks * sizeof(int)); for (int i=0; i < blocks; i++) { partial_results[i] = 0; } // Launch the kernel find_word_kernel<<<blocks, THREADS_PER_BLOCK>>>(word_tmp, str_tmp, str_length, partial_results); cudaDeviceSynchronize(); // 3. --- > Analyze the result for (int i=0; i<blocks; i++) { if ( partial_results[i] ) { found_here = partial_results[i]; break; } } // 4. ---> Cleanup and return // Free unneeded memory cudaFree(partial_results); cudaFree(word_tmp); cudaFree(str_tmp); return found_here; } // --- find_word_in_gpu
#include <hip/hip_runtime.h> # include <stdio.h> # include <stdlib.h> // To use the exit function and malloc # include <string.h> /* * ============================================ * Find a word in a given string (CUDA version) * ============================================ * * Usage: find_word <word> <input_file> * * Given a word, load the first line of the input file and * search the word in it. This version uses a CUDA-enabled * graphics card. */ // Global constant # define NOT_FOUND (-1) # define THREADS_PER_BLOCK (128) // Function declaration int find_word_in_gpu(char *word, char *search_here); // ---------------------------------------------------------------------------- // Kernel definition void __global__ find_word_kernel(char *word, char *search_here, int ref_length, int *result) { /* * Search for the given word in the search_here string. * * At first occurrence, returns the starting position. If the word was not * found, return NOT_FOUND. */ // 1. --- > Prepare for execution // Allocate shared memory for the result __shared__ int found_here[THREADS_PER_BLOCK]; // The starting position of each thread int start = (blockDim.x * blockIdx.x) + threadIdx.x; // The shared memory index for this thread int shared_idx = threadIdx.x; // 2. --- > Search the word if (start < ref_length-1) { // Check for a valid position int found = 1; // Pretend you found it int letters_coincide; // ---> Check if the word is found from here for (int j=0; word[j] != '\0'; j++) { // Check if the letters coincide letters_coincide = (search_here[start+j] == word[j]); found = (found && letters_coincide); } // ---> Place your mark if (found) { // Place position if it was found found_here[shared_idx] = start; } else { found_here[shared_idx] = 0; } } else { // Non working thread, initialize shared memory // You will definitely NOT find it here found_here[start] = 0; } // Wait until everyone finishes __syncthreads(); // 3. --- > Reduce the result on every thread // ---> Reduce the results to one per block int threads_per_block = blockDim.x; int i = (threads_per_block+1)/2; while( i != 0 ) { // Reduce halving the results on each iteration if (threadIdx.x < i) { // Check if the entries are within reach if ( shared_idx + i < threads_per_block ) { // Check if it was found here found_here[shared_idx] = (found_here[shared_idx] ? found_here[shared_idx] : found_here[shared_idx+i]); } } // Prepare the next reduction i/=2; __syncthreads(); } // 4. --- > Save the block's reduction and return if (threadIdx.x == 0) { result[blockIdx.x] = found_here[shared_idx]; } return; } // --- find_word_kernel // ---------------------------------------------------------------------------- /* --- << Main function >> --- */ int main(int argc, char *argv[]) { // 1. ---> Find the input file and the word to search char *search_here = argv[1]; char *word = argv[2]; // 2. ---> Search the word in the reference string int found_here = find_word_in_gpu(word, search_here); // 3. ---> Display the results if( found_here == NOT_FOUND ) { // The word was not found printf("Sorry, the word was not found in the reference string\n"); printf("Word: %s\nReference string: %s\n\n", word, search_here); } else { // The word was found printf("The word was found at position: %d\n", found_here); // Signal the position printf("Word: %s\nReference string: %s\n", word, search_here); printf(" "); for (int i=0; i < found_here-1; i++) printf(" "); printf("^\n\n"); } // 4. ---> Finish! return 0; } // --- main // ---------------------------------------------------------------------------- /* --- << Functions >> --- */ // --- --- - --- --- - --- --- - --- --- - --- --- - --- --- - --- -- int find_word_in_gpu(char *word, char *search_here) { /* * Search for the given word in the search_here string. * * At first occurrence, returns the starting position. If the word was not * found, return NOT_FOUND. Uses a CUDA-enabled graphics card. */ // 1. --- > Prepare the data in the CPU // Lookup the lengths of the words int word_length = strlen(word); int str_length = strlen(search_here); int found_here = NOT_FOUND; // Copy the word to the GPU char *word_tmp; hipMallocManaged(&word_tmp, word_length * sizeof(char)); strcpy(word_tmp, word); // Copy the search_string to the GPU char *str_tmp; hipMallocManaged(&str_tmp, str_length * sizeof(char)); strcpy(str_tmp, search_here); // 2. --- > Prepare and launch the Kernel // Calculate the total threads to use (one per window) int total_threads = (str_length - word_length) + 1; // Calculate the blocks needed for that int blocks = (total_threads + THREADS_PER_BLOCK-1) / THREADS_PER_BLOCK; printf("Launching %d threads in %d blocks\n", THREADS_PER_BLOCK, blocks); // Prepare for the arrival of the results int *partial_results; hipMallocManaged(&partial_results, blocks * sizeof(int)); for (int i=0; i < blocks; i++) { partial_results[i] = 0; } // Launch the kernel find_word_kernel<<<blocks, THREADS_PER_BLOCK>>>(word_tmp, str_tmp, str_length, partial_results); hipDeviceSynchronize(); // 3. --- > Analyze the result for (int i=0; i<blocks; i++) { if ( partial_results[i] ) { found_here = partial_results[i]; break; } } // 4. ---> Cleanup and return // Free unneeded memory hipFree(partial_results); hipFree(word_tmp); hipFree(str_tmp); return found_here; } // --- find_word_in_gpu
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> # include <stdio.h> # include <stdlib.h> // To use the exit function and malloc # include <string.h> /* * ============================================ * Find a word in a given string (CUDA version) * ============================================ * * Usage: find_word <word> <input_file> * * Given a word, load the first line of the input file and * search the word in it. This version uses a CUDA-enabled * graphics card. */ // Global constant # define NOT_FOUND (-1) # define THREADS_PER_BLOCK (128) // Function declaration int find_word_in_gpu(char *word, char *search_here); // ---------------------------------------------------------------------------- // Kernel definition void __global__ find_word_kernel(char *word, char *search_here, int ref_length, int *result) { /* * Search for the given word in the search_here string. * * At first occurrence, returns the starting position. If the word was not * found, return NOT_FOUND. */ // 1. --- > Prepare for execution // Allocate shared memory for the result __shared__ int found_here[THREADS_PER_BLOCK]; // The starting position of each thread int start = (blockDim.x * blockIdx.x) + threadIdx.x; // The shared memory index for this thread int shared_idx = threadIdx.x; // 2. --- > Search the word if (start < ref_length-1) { // Check for a valid position int found = 1; // Pretend you found it int letters_coincide; // ---> Check if the word is found from here for (int j=0; word[j] != '\0'; j++) { // Check if the letters coincide letters_coincide = (search_here[start+j] == word[j]); found = (found && letters_coincide); } // ---> Place your mark if (found) { // Place position if it was found found_here[shared_idx] = start; } else { found_here[shared_idx] = 0; } } else { // Non working thread, initialize shared memory // You will definitely NOT find it here found_here[start] = 0; } // Wait until everyone finishes __syncthreads(); // 3. --- > Reduce the result on every thread // ---> Reduce the results to one per block int threads_per_block = blockDim.x; int i = (threads_per_block+1)/2; while( i != 0 ) { // Reduce halving the results on each iteration if (threadIdx.x < i) { // Check if the entries are within reach if ( shared_idx + i < threads_per_block ) { // Check if it was found here found_here[shared_idx] = (found_here[shared_idx] ? found_here[shared_idx] : found_here[shared_idx+i]); } } // Prepare the next reduction i/=2; __syncthreads(); } // 4. --- > Save the block's reduction and return if (threadIdx.x == 0) { result[blockIdx.x] = found_here[shared_idx]; } return; } // --- find_word_kernel // ---------------------------------------------------------------------------- /* --- << Main function >> --- */ int main(int argc, char *argv[]) { // 1. ---> Find the input file and the word to search char *search_here = argv[1]; char *word = argv[2]; // 2. ---> Search the word in the reference string int found_here = find_word_in_gpu(word, search_here); // 3. ---> Display the results if( found_here == NOT_FOUND ) { // The word was not found printf("Sorry, the word was not found in the reference string\n"); printf("Word: %s\nReference string: %s\n\n", word, search_here); } else { // The word was found printf("The word was found at position: %d\n", found_here); // Signal the position printf("Word: %s\nReference string: %s\n", word, search_here); printf(" "); for (int i=0; i < found_here-1; i++) printf(" "); printf("^\n\n"); } // 4. ---> Finish! return 0; } // --- main // ---------------------------------------------------------------------------- /* --- << Functions >> --- */ // --- --- - --- --- - --- --- - --- --- - --- --- - --- --- - --- -- int find_word_in_gpu(char *word, char *search_here) { /* * Search for the given word in the search_here string. * * At first occurrence, returns the starting position. If the word was not * found, return NOT_FOUND. Uses a CUDA-enabled graphics card. */ // 1. --- > Prepare the data in the CPU // Lookup the lengths of the words int word_length = strlen(word); int str_length = strlen(search_here); int found_here = NOT_FOUND; // Copy the word to the GPU char *word_tmp; hipMallocManaged(&word_tmp, word_length * sizeof(char)); strcpy(word_tmp, word); // Copy the search_string to the GPU char *str_tmp; hipMallocManaged(&str_tmp, str_length * sizeof(char)); strcpy(str_tmp, search_here); // 2. --- > Prepare and launch the Kernel // Calculate the total threads to use (one per window) int total_threads = (str_length - word_length) + 1; // Calculate the blocks needed for that int blocks = (total_threads + THREADS_PER_BLOCK-1) / THREADS_PER_BLOCK; printf("Launching %d threads in %d blocks\n", THREADS_PER_BLOCK, blocks); // Prepare for the arrival of the results int *partial_results; hipMallocManaged(&partial_results, blocks * sizeof(int)); for (int i=0; i < blocks; i++) { partial_results[i] = 0; } // Launch the kernel find_word_kernel<<<blocks, THREADS_PER_BLOCK>>>(word_tmp, str_tmp, str_length, partial_results); hipDeviceSynchronize(); // 3. --- > Analyze the result for (int i=0; i<blocks; i++) { if ( partial_results[i] ) { found_here = partial_results[i]; break; } } // 4. ---> Cleanup and return // Free unneeded memory hipFree(partial_results); hipFree(word_tmp); hipFree(str_tmp); return found_here; } // --- find_word_in_gpu
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z16find_word_kernelPcS_iPi .globl _Z16find_word_kernelPcS_iPi .p2align 8 .type _Z16find_word_kernelPcS_iPi,@function _Z16find_word_kernelPcS_iPi: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x10 s_mov_b32 s4, s15 s_mov_b32 s10, exec_lo s_waitcnt lgkmcnt(0) s_and_b32 s5, s2, 0xffff s_add_i32 s3, s3, -1 v_mad_u64_u32 v[1:2], null, s4, s5, v[0:1] v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_2) v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_6 s_load_b64 s[2:3], s[0:1], 0x0 v_mov_b32_e32 v2, 0 s_waitcnt lgkmcnt(0) global_load_u8 v3, v2, s[2:3] s_waitcnt vmcnt(0) v_cmp_eq_u16_e32 vcc_lo, 0, v3 s_cbranch_vccnz .LBB0_5 s_load_b64 s[6:7], s[0:1], 0x8 v_mov_b32_e32 v4, v1 s_add_u32 s8, s2, 1 s_addc_u32 s9, s3, 0 s_mov_b32 s3, -1 .p2align 6 .LBB0_3: s_delay_alu instid0(VALU_DEP_1) v_ashrrev_i32_e32 v6, 31, v4 s_waitcnt lgkmcnt(0) v_add_co_u32 v5, vcc_lo, s6, v4 v_and_b32_e32 v7, 0xff, v3 v_add_nc_u32_e32 v4, 1, v4 v_add_co_ci_u32_e32 v6, vcc_lo, s7, v6, vcc_lo global_load_u8 v5, v[5:6], off global_load_u8 v3, v2, s[8:9] s_waitcnt vmcnt(1) v_cmp_eq_u16_e32 vcc_lo, v5, v7 s_waitcnt vmcnt(0) v_cmp_eq_u16_e64 s2, 0, v3 s_and_b32 s3, s3, vcc_lo s_add_u32 s8, s8, 1 s_addc_u32 s9, s9, 0 s_and_not1_b32 s11, s11, exec_lo s_and_b32 s12, s3, exec_lo s_and_b32 vcc_lo, exec_lo, s2 s_or_b32 s11, s11, s12 s_cbranch_vccz .LBB0_3 v_cndmask_b32_e64 v1, 0, v1, s11 .LBB0_5: s_delay_alu instid0(VALU_DEP_1) v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v1, v0 .LBB0_6: s_or_b32 exec_lo, exec_lo, s10 s_delay_alu instid0(VALU_DEP_1) v_lshlrev_b32_e32 v1, 2, v1 s_cmp_eq_u32 s5, 0 ds_store_b32 v1, v2 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_14 v_lshlrev_b32_e32 v1, 2, v0 s_add_i32 s2, s5, 1 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_10 .p2align 6 .LBB0_8: s_or_b32 exec_lo, exec_lo, s7 s_waitcnt lgkmcnt(0) ds_store_b32 v1, v3 .LBB0_9: s_or_b32 exec_lo, exec_lo, s6 s_cmp_gt_u32 s3, 3 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc0 .LBB0_14 .LBB0_10: s_mov_b32 s3, s2 s_lshr_b32 s2, s2, 1 s_mov_b32 s6, exec_lo v_cmpx_gt_u32_e64 s2, v0 s_cbranch_execz .LBB0_9 v_add_nc_u32_e32 v2, s2, v0 s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_u32_e32 vcc_lo, s5, v2 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_9 ds_load_b32 v3, v1 s_mov_b32 s7, exec_lo s_waitcnt lgkmcnt(0) v_cmpx_eq_u32_e32 0, v3 s_cbranch_execz .LBB0_8 v_lshlrev_b32_e32 v2, 2, v2 ds_load_b32 v3, v2 s_branch .LBB0_8 .LBB0_14: s_set_inst_prefetch_distance 0x2 s_mov_b32 s2, exec_lo v_cmpx_eq_u32_e32 0, v0 s_cbranch_execz .LBB0_16 v_dual_mov_b32 v1, 0 :: v_dual_lshlrev_b32 v0, 2, v0 s_load_b64 s[0:1], s[0:1], 0x18 s_mov_b32 s5, 0 s_delay_alu instid0(SALU_CYCLE_1) s_lshl_b64 s[2:3], s[4:5], 2 ds_load_b32 v0, v0 s_waitcnt lgkmcnt(0) s_add_u32 s0, s0, s2 s_addc_u32 s1, s1, s3 global_store_b32 v1, v0, s[0:1] .LBB0_16: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z16find_word_kernelPcS_iPi .amdhsa_group_segment_fixed_size 512 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z16find_word_kernelPcS_iPi, .Lfunc_end0-_Z16find_word_kernelPcS_iPi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 512 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z16find_word_kernelPcS_iPi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z16find_word_kernelPcS_iPi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> # include <stdio.h> # include <stdlib.h> // To use the exit function and malloc # include <string.h> /* * ============================================ * Find a word in a given string (CUDA version) * ============================================ * * Usage: find_word <word> <input_file> * * Given a word, load the first line of the input file and * search the word in it. This version uses a CUDA-enabled * graphics card. */ // Global constant # define NOT_FOUND (-1) # define THREADS_PER_BLOCK (128) // Function declaration int find_word_in_gpu(char *word, char *search_here); // ---------------------------------------------------------------------------- // Kernel definition void __global__ find_word_kernel(char *word, char *search_here, int ref_length, int *result) { /* * Search for the given word in the search_here string. * * At first occurrence, returns the starting position. If the word was not * found, return NOT_FOUND. */ // 1. --- > Prepare for execution // Allocate shared memory for the result __shared__ int found_here[THREADS_PER_BLOCK]; // The starting position of each thread int start = (blockDim.x * blockIdx.x) + threadIdx.x; // The shared memory index for this thread int shared_idx = threadIdx.x; // 2. --- > Search the word if (start < ref_length-1) { // Check for a valid position int found = 1; // Pretend you found it int letters_coincide; // ---> Check if the word is found from here for (int j=0; word[j] != '\0'; j++) { // Check if the letters coincide letters_coincide = (search_here[start+j] == word[j]); found = (found && letters_coincide); } // ---> Place your mark if (found) { // Place position if it was found found_here[shared_idx] = start; } else { found_here[shared_idx] = 0; } } else { // Non working thread, initialize shared memory // You will definitely NOT find it here found_here[start] = 0; } // Wait until everyone finishes __syncthreads(); // 3. --- > Reduce the result on every thread // ---> Reduce the results to one per block int threads_per_block = blockDim.x; int i = (threads_per_block+1)/2; while( i != 0 ) { // Reduce halving the results on each iteration if (threadIdx.x < i) { // Check if the entries are within reach if ( shared_idx + i < threads_per_block ) { // Check if it was found here found_here[shared_idx] = (found_here[shared_idx] ? found_here[shared_idx] : found_here[shared_idx+i]); } } // Prepare the next reduction i/=2; __syncthreads(); } // 4. --- > Save the block's reduction and return if (threadIdx.x == 0) { result[blockIdx.x] = found_here[shared_idx]; } return; } // --- find_word_kernel // ---------------------------------------------------------------------------- /* --- << Main function >> --- */ int main(int argc, char *argv[]) { // 1. ---> Find the input file and the word to search char *search_here = argv[1]; char *word = argv[2]; // 2. ---> Search the word in the reference string int found_here = find_word_in_gpu(word, search_here); // 3. ---> Display the results if( found_here == NOT_FOUND ) { // The word was not found printf("Sorry, the word was not found in the reference string\n"); printf("Word: %s\nReference string: %s\n\n", word, search_here); } else { // The word was found printf("The word was found at position: %d\n", found_here); // Signal the position printf("Word: %s\nReference string: %s\n", word, search_here); printf(" "); for (int i=0; i < found_here-1; i++) printf(" "); printf("^\n\n"); } // 4. ---> Finish! return 0; } // --- main // ---------------------------------------------------------------------------- /* --- << Functions >> --- */ // --- --- - --- --- - --- --- - --- --- - --- --- - --- --- - --- -- int find_word_in_gpu(char *word, char *search_here) { /* * Search for the given word in the search_here string. * * At first occurrence, returns the starting position. If the word was not * found, return NOT_FOUND. Uses a CUDA-enabled graphics card. */ // 1. --- > Prepare the data in the CPU // Lookup the lengths of the words int word_length = strlen(word); int str_length = strlen(search_here); int found_here = NOT_FOUND; // Copy the word to the GPU char *word_tmp; hipMallocManaged(&word_tmp, word_length * sizeof(char)); strcpy(word_tmp, word); // Copy the search_string to the GPU char *str_tmp; hipMallocManaged(&str_tmp, str_length * sizeof(char)); strcpy(str_tmp, search_here); // 2. --- > Prepare and launch the Kernel // Calculate the total threads to use (one per window) int total_threads = (str_length - word_length) + 1; // Calculate the blocks needed for that int blocks = (total_threads + THREADS_PER_BLOCK-1) / THREADS_PER_BLOCK; printf("Launching %d threads in %d blocks\n", THREADS_PER_BLOCK, blocks); // Prepare for the arrival of the results int *partial_results; hipMallocManaged(&partial_results, blocks * sizeof(int)); for (int i=0; i < blocks; i++) { partial_results[i] = 0; } // Launch the kernel find_word_kernel<<<blocks, THREADS_PER_BLOCK>>>(word_tmp, str_tmp, str_length, partial_results); hipDeviceSynchronize(); // 3. --- > Analyze the result for (int i=0; i<blocks; i++) { if ( partial_results[i] ) { found_here = partial_results[i]; break; } } // 4. ---> Cleanup and return // Free unneeded memory hipFree(partial_results); hipFree(word_tmp); hipFree(str_tmp); return found_here; } // --- find_word_in_gpu
.text .file "find_word_shared.hip" .globl _Z31__device_stub__find_word_kernelPcS_iPi # -- Begin function _Z31__device_stub__find_word_kernelPcS_iPi .p2align 4, 0x90 .type _Z31__device_stub__find_word_kernelPcS_iPi,@function _Z31__device_stub__find_word_kernelPcS_iPi: # @_Z31__device_stub__find_word_kernelPcS_iPi .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 4(%rsp) movq %rcx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 4(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z16find_word_kernelPcS_iPi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z31__device_stub__find_word_kernelPcS_iPi, .Lfunc_end0-_Z31__device_stub__find_word_kernelPcS_iPi .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq 8(%rsi), %r14 movq 16(%rsi), %r15 movq %r15, %rdi movq %r14, %rsi callq _Z16find_word_in_gpuPcS_ cmpl $-1, %eax je .LBB1_1 # %bb.2: movl %eax, %ebx movl $.L.str.2, %edi movl %eax, %esi xorl %eax, %eax callq printf movl $.L.str.3, %edi movq %r15, %rsi movq %r14, %rdx xorl %eax, %eax callq printf movl $.L.str.4, %edi xorl %eax, %eax callq printf cmpl $2, %ebx jl .LBB1_5 # %bb.3: # %.lr.ph.preheader decl %ebx .p2align 4, 0x90 .LBB1_4: # %.lr.ph # =>This Inner Loop Header: Depth=1 movl $32, %edi callq putchar@PLT decl %ebx jne .LBB1_4 .LBB1_5: # %._crit_edge movl $.Lstr, %edi callq puts@PLT jmp .LBB1_6 .LBB1_1: movl $.Lstr.1, %edi callq puts@PLT movl $.L.str.1, %edi movq %r15, %rsi movq %r14, %rdx xorl %eax, %eax callq printf .LBB1_6: xorl %eax, %eax popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .globl _Z16find_word_in_gpuPcS_ # -- Begin function _Z16find_word_in_gpuPcS_ .p2align 4, 0x90 .type _Z16find_word_in_gpuPcS_,@function _Z16find_word_in_gpuPcS_: # @_Z16find_word_in_gpuPcS_ .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $152, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rsi, %r12 movq %rdi, %rbx callq strlen movq %rax, %r15 movq %r12, %rdi callq strlen movq %rax, %r14 movslq %r15d, %r13 leaq 24(%rsp), %rdi movl $1, %r15d movq %r13, %rsi movl $1, %edx callq hipMallocManaged movq 24(%rsp), %rdi movq %rbx, %rsi callq strcpy movslq %r14d, %rbx leaq 16(%rsp), %rdi movq %rbx, %rsi movl $1, %edx callq hipMallocManaged movq 16(%rsp), %rdi movq %r12, %rsi callq strcpy subl %r13d, %ebx leal 128(%rbx), %eax leal 255(%rbx), %ebp testl %eax, %eax cmovnsl %eax, %ebp sarl $7, %ebp movl $.L.str.7, %edi movl $128, %esi movl %ebp, %edx xorl %eax, %eax callq printf movslq %ebp, %rsi shlq $2, %rsi leaq 8(%rsp), %rdi movl $1, %edx callq hipMallocManaged cmpl $2147483519, %ebx # imm = 0x7FFFFF7F ja .LBB2_2 # %bb.1: # %.lr.ph movq 8(%rsp), %rdi cmpl $2, %ebp cmovgel %ebp, %r15d shlq $2, %r15 xorl %esi, %esi movq %r15, %rdx callq memset@PLT .LBB2_2: # %._crit_edge movl %ebp, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $128, %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_4 # %bb.3: movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 8(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movl %r14d, 36(%rsp) movq %rdx, 88(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 36(%rsp), %rax movq %rax, 128(%rsp) leaq 88(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z16find_word_kernelPcS_iPi, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_4: callq hipDeviceSynchronize cmpl $2147483519, %ebx # imm = 0x7FFFFF7F ja .LBB2_9 # %bb.5: # %.lr.ph36 movq 8(%rsp), %rax cmpl $2, %ebp movl $1, %ecx cmovgel %ebp, %ecx xorl %edx, %edx .p2align 4, 0x90 .LBB2_6: # =>This Inner Loop Header: Depth=1 movl (%rax,%rdx,4), %ebx testl %ebx, %ebx jne .LBB2_10 # %bb.7: # in Loop: Header=BB2_6 Depth=1 incq %rdx cmpq %rdx, %rcx jne .LBB2_6 .LBB2_9: movl $-1, %ebx .LBB2_10: # %._crit_edge37 movq 8(%rsp), %rdi callq hipFree movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movl %ebx, %eax addq $152, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z16find_word_in_gpuPcS_, .Lfunc_end2-_Z16find_word_in_gpuPcS_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z16find_word_kernelPcS_iPi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z16find_word_kernelPcS_iPi,@object # @_Z16find_word_kernelPcS_iPi .section .rodata,"a",@progbits .globl _Z16find_word_kernelPcS_iPi .p2align 3, 0x0 _Z16find_word_kernelPcS_iPi: .quad _Z31__device_stub__find_word_kernelPcS_iPi .size _Z16find_word_kernelPcS_iPi, 8 .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "Word: %s\nReference string: %s\n\n" .size .L.str.1, 32 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "The word was found at position: %d\n" .size .L.str.2, 36 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Word: %s\nReference string: %s\n" .size .L.str.3, 31 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz " " .size .L.str.4, 20 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "Launching %d threads in %d blocks\n" .size .L.str.7, 35 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z16find_word_kernelPcS_iPi" .size .L__unnamed_1, 28 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "^\n" .size .Lstr, 3 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "Sorry, the word was not found in the reference string" .size .Lstr.1, 54 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z31__device_stub__find_word_kernelPcS_iPi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z16find_word_kernelPcS_iPi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z16find_word_kernelPcS_iPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */ /* 0x000e220000002500 */ /*0020*/ ULDC UR4, c[0x0][0x170] ; /* 0x00005c0000047ab9 */ /* 0x000fe20000000800 */ /*0030*/ BSSY B0, 0x280 ; /* 0x0000024000007945 */ /* 0x000fe20003800000 */ /*0040*/ UIADD3 UR4, UR4, -0x1, URZ ; /* 0xffffffff04047890 */ /* 0x000fe2000fffe03f */ /*0050*/ S2R R11, SR_TID.X ; /* 0x00000000000b7919 */ /* 0x000e220000002100 */ /*0060*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */ /* 0x000fe20000000a00 */ /*0070*/ IMAD R0, R8, c[0x0][0x0], R11 ; /* 0x0000000008007a24 */ /* 0x001fe400078e020b */ /*0080*/ IMAD.SHL.U32 R6, R11, 0x4, RZ ; /* 0x000000040b067824 */ /* 0x000fc600078e00ff */ /*0090*/ ISETP.GE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */ /* 0x000fda000bf06270 */ /*00a0*/ @P0 STS [R0.X4], RZ ; /* 0x000000ff00000388 */ /* 0x0001e20000004800 */ /*00b0*/ @P0 BRA 0x270 ; /* 0x000001b000000947 */ /* 0x000fea0003800000 */ /*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff027624 */ /* 0x000fe400078e00ff */ /*00d0*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */ /* 0x000fca00078e00ff */ /*00e0*/ LDG.E.U8 R2, [R2.64] ; /* 0x0000000802027981 */ /* 0x000ea2000c1e1100 */ /*00f0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0100*/ ISETP.NE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x004fda0003f25270 */ /*0110*/ @!P1 BRA 0x250 ; /* 0x0000013000009947 */ /* 0x000fea0003800000 */ /*0120*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fe20003f0f070 */ /*0130*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fe20008000000 */ /*0140*/ PRMT R7, R2, 0x7610, R7 ; /* 0x0000761002077816 */ /* 0x000fe40000000007 */ /*0150*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */ /* 0x000fe20000000a00 */ /*0160*/ IADD3 R3, R0, UR4, RZ ; /* 0x0000000400037c10 */ /* 0x000fe2000fffe0ff */ /*0170*/ UIADD3 UR5, UP0, UR4, UR6, URZ ; /* 0x0000000604057290 */ /* 0x000fe2000ff1e03f */ /*0180*/ PRMT R9, R7, 0x9910, RZ ; /* 0x0000991007097816 */ /* 0x000fe400000000ff */ /*0190*/ IADD3 R2, P1, R3.reuse, c[0x0][0x168], RZ ; /* 0x00005a0003027a10 */ /* 0x040fe20007f3e0ff */ /*01a0*/ ULEA.HI.X.SX32 UR6, UR4, UR7, 0x1, UP0 ; /* 0x0000000704067291 */ /* 0x000fe400080f0e3f */ /*01b0*/ IMAD.U32 R4, RZ, RZ, UR5 ; /* 0x00000005ff047e24 */ /* 0x000fe2000f8e00ff */ /*01c0*/ LEA.HI.X.SX32 R3, R3, c[0x0][0x16c], 0x1, P1 ; /* 0x00005b0003037a11 */ /* 0x000fc600008f0eff */ /*01d0*/ IMAD.U32 R5, RZ, RZ, UR6 ; /* 0x00000006ff057e24 */ /* 0x000fe4000f8e00ff */ /*01e0*/ LDG.E.U8 R2, [R2.64] ; /* 0x0000000802027981 */ /* 0x000ea8000c1e1100 */ /*01f0*/ LDG.E.U8 R7, [R4.64+0x1] ; /* 0x0000010804077981 */ /* 0x000ee2000c1e1100 */ /*0200*/ UIADD3 UR4, UR4, 0x1, URZ ; /* 0x0000000104047890 */ /* 0x000fe2000fffe03f */ /*0210*/ ISETP.EQ.AND P0, PT, R2, R9, P0 ; /* 0x000000090200720c */ /* 0x004fe40000702270 */ /*0220*/ ISETP.NE.AND P1, PT, R7, RZ, PT ; /* 0x000000ff0700720c */ /* 0x008fda0003f25270 */ /*0230*/ @P1 BRA 0x150 ; /* 0xffffff1000001947 */ /* 0x000fea000383ffff */ /*0240*/ PLOP3.LUT P0, PT, P0, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fda000070e170 */ /*0250*/ @!P0 STS [R11.X4], R0 ; /* 0x000000000b008388 */ /* 0x0003e80000004800 */ /*0260*/ @P0 STS [R11.X4], RZ ; /* 0x000000ff0b000388 */ /* 0x0003e40000004800 */ /*0270*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0280*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff007624 */ /* 0x003fe200078e00ff */ /*0290*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*02a0*/ ISETP.NE.AND P1, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */ /* 0x000fc60003f25270 */ /*02b0*/ IADD3 R2, R0, 0x2, RZ ; /* 0x0000000200027810 */ /* 0x000fc80007ffe0ff */ /*02c0*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */ /* 0x000fda0003f06070 */ /*02d0*/ @!P0 BRA 0x3e0 ; /* 0x0000010000008947 */ /* 0x000fea0003800000 */ /*02e0*/ IADD3 R0, R0, 0x1, RZ ; /* 0x0000000100007810 */ /* 0x000fc80007ffe0ff */ /*02f0*/ LEA.HI R0, R0, R0, RZ, 0x1 ; /* 0x0000000000007211 */ /* 0x000fc800078f08ff */ /*0300*/ SHF.R.S32.HI R0, RZ, 0x1, R0 ; /* 0x00000001ff007819 */ /* 0x000fca0000011400 */ /*0310*/ IMAD.IADD R2, R0, 0x1, R11 ; /* 0x0000000100027824 */ /* 0x000fca00078e020b */ /*0320*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x0], PT ; /* 0x0000000002007a0c */ /* 0x000fc80003f06270 */ /*0330*/ ISETP.GE.U32.OR P0, PT, R11, R0, P0 ; /* 0x000000000b00720c */ /* 0x000fda0000706470 */ /*0340*/ @!P0 LDS R2, [R11.X4] ; /* 0x000000000b028984 */ /* 0x000e240000004800 */ /*0350*/ @!P0 ISETP.NE.AND P2, PT, R2, RZ, PT ; /* 0x000000ff0200820c */ /* 0x001fe40003f45270 */ /*0360*/ IADD3 R2, R0.reuse, 0x1, RZ ; /* 0x0000000100027810 */ /* 0x040fe40007ffe0ff */ /*0370*/ @!P0 SEL R3, R0, RZ, !P2 ; /* 0x000000ff00038207 */ /* 0x000fca0005000000 */ /*0380*/ @!P0 IMAD R3, R3, 0x4, R6 ; /* 0x0000000403038824 */ /* 0x000fcc00078e0206 */ /*0390*/ @!P0 LDS R3, [R3] ; /* 0x0000000003038984 */ /* 0x000e280000000800 */ /*03a0*/ @!P0 STS [R11.X4], R3 ; /* 0x000000030b008388 */ /* 0x0011e80000004800 */ /*03b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*03c0*/ ISETP.GT.U32.AND P0, PT, R2, 0x2, PT ; /* 0x000000020200780c */ /* 0x000fda0003f04070 */ /*03d0*/ @P0 BRA 0x2f0 ; /* 0xffffff1000000947 */ /* 0x001fea000383ffff */ /*03e0*/ @P1 EXIT ; /* 0x000000000000194d */ /* 0x000fea0003800000 */ /*03f0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */ /* 0x000e220000000800 */ /*0400*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fc800078e00ff */ /*0410*/ IMAD.WIDE.U32 R2, R8, R3, c[0x0][0x178] ; /* 0x00005e0008027625 */ /* 0x000fca00078e0003 */ /*0420*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x001fe2000c101908 */ /*0430*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0440*/ BRA 0x440; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0450*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0460*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0470*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0480*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0490*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z16find_word_kernelPcS_iPi .globl _Z16find_word_kernelPcS_iPi .p2align 8 .type _Z16find_word_kernelPcS_iPi,@function _Z16find_word_kernelPcS_iPi: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x10 s_mov_b32 s4, s15 s_mov_b32 s10, exec_lo s_waitcnt lgkmcnt(0) s_and_b32 s5, s2, 0xffff s_add_i32 s3, s3, -1 v_mad_u64_u32 v[1:2], null, s4, s5, v[0:1] v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_2) v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_6 s_load_b64 s[2:3], s[0:1], 0x0 v_mov_b32_e32 v2, 0 s_waitcnt lgkmcnt(0) global_load_u8 v3, v2, s[2:3] s_waitcnt vmcnt(0) v_cmp_eq_u16_e32 vcc_lo, 0, v3 s_cbranch_vccnz .LBB0_5 s_load_b64 s[6:7], s[0:1], 0x8 v_mov_b32_e32 v4, v1 s_add_u32 s8, s2, 1 s_addc_u32 s9, s3, 0 s_mov_b32 s3, -1 .p2align 6 .LBB0_3: s_delay_alu instid0(VALU_DEP_1) v_ashrrev_i32_e32 v6, 31, v4 s_waitcnt lgkmcnt(0) v_add_co_u32 v5, vcc_lo, s6, v4 v_and_b32_e32 v7, 0xff, v3 v_add_nc_u32_e32 v4, 1, v4 v_add_co_ci_u32_e32 v6, vcc_lo, s7, v6, vcc_lo global_load_u8 v5, v[5:6], off global_load_u8 v3, v2, s[8:9] s_waitcnt vmcnt(1) v_cmp_eq_u16_e32 vcc_lo, v5, v7 s_waitcnt vmcnt(0) v_cmp_eq_u16_e64 s2, 0, v3 s_and_b32 s3, s3, vcc_lo s_add_u32 s8, s8, 1 s_addc_u32 s9, s9, 0 s_and_not1_b32 s11, s11, exec_lo s_and_b32 s12, s3, exec_lo s_and_b32 vcc_lo, exec_lo, s2 s_or_b32 s11, s11, s12 s_cbranch_vccz .LBB0_3 v_cndmask_b32_e64 v1, 0, v1, s11 .LBB0_5: s_delay_alu instid0(VALU_DEP_1) v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v1, v0 .LBB0_6: s_or_b32 exec_lo, exec_lo, s10 s_delay_alu instid0(VALU_DEP_1) v_lshlrev_b32_e32 v1, 2, v1 s_cmp_eq_u32 s5, 0 ds_store_b32 v1, v2 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_14 v_lshlrev_b32_e32 v1, 2, v0 s_add_i32 s2, s5, 1 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_10 .p2align 6 .LBB0_8: s_or_b32 exec_lo, exec_lo, s7 s_waitcnt lgkmcnt(0) ds_store_b32 v1, v3 .LBB0_9: s_or_b32 exec_lo, exec_lo, s6 s_cmp_gt_u32 s3, 3 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc0 .LBB0_14 .LBB0_10: s_mov_b32 s3, s2 s_lshr_b32 s2, s2, 1 s_mov_b32 s6, exec_lo v_cmpx_gt_u32_e64 s2, v0 s_cbranch_execz .LBB0_9 v_add_nc_u32_e32 v2, s2, v0 s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_u32_e32 vcc_lo, s5, v2 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_9 ds_load_b32 v3, v1 s_mov_b32 s7, exec_lo s_waitcnt lgkmcnt(0) v_cmpx_eq_u32_e32 0, v3 s_cbranch_execz .LBB0_8 v_lshlrev_b32_e32 v2, 2, v2 ds_load_b32 v3, v2 s_branch .LBB0_8 .LBB0_14: s_set_inst_prefetch_distance 0x2 s_mov_b32 s2, exec_lo v_cmpx_eq_u32_e32 0, v0 s_cbranch_execz .LBB0_16 v_dual_mov_b32 v1, 0 :: v_dual_lshlrev_b32 v0, 2, v0 s_load_b64 s[0:1], s[0:1], 0x18 s_mov_b32 s5, 0 s_delay_alu instid0(SALU_CYCLE_1) s_lshl_b64 s[2:3], s[4:5], 2 ds_load_b32 v0, v0 s_waitcnt lgkmcnt(0) s_add_u32 s0, s0, s2 s_addc_u32 s1, s1, s3 global_store_b32 v1, v0, s[0:1] .LBB0_16: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z16find_word_kernelPcS_iPi .amdhsa_group_segment_fixed_size 512 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z16find_word_kernelPcS_iPi, .Lfunc_end0-_Z16find_word_kernelPcS_iPi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 512 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z16find_word_kernelPcS_iPi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z16find_word_kernelPcS_iPi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000f4f9d_00000000-6_find_word_shared.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z41__device_stub__Z16find_word_kernelPcS_iPiPcS_iPi .type _Z41__device_stub__Z16find_word_kernelPcS_iPiPcS_iPi, @function _Z41__device_stub__Z16find_word_kernelPcS_iPiPcS_iPi: .LFB2083: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z16find_word_kernelPcS_iPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z41__device_stub__Z16find_word_kernelPcS_iPiPcS_iPi, .-_Z41__device_stub__Z16find_word_kernelPcS_iPiPcS_iPi .globl _Z16find_word_kernelPcS_iPi .type _Z16find_word_kernelPcS_iPi, @function _Z16find_word_kernelPcS_iPi: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z41__device_stub__Z16find_word_kernelPcS_iPiPcS_iPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z16find_word_kernelPcS_iPi, .-_Z16find_word_kernelPcS_iPi .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "Launching %d threads in %d blocks\n" .text .globl _Z16find_word_in_gpuPcS_ .type _Z16find_word_in_gpuPcS_, @function _Z16find_word_in_gpuPcS_: .LFB2058: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $64, %rsp .cfi_def_cfa_offset 112 movq %rdi, %r14 movq %rsi, %r13 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax call strlen@PLT movq %rax, %rbx movq %r13, %rdi call strlen@PLT movq %rax, %rbp movl %eax, %r12d movslq %ebx, %rsi leaq 8(%rsp), %rdi movl $1, %edx call cudaMallocManaged@PLT movq %r14, %rsi movq 8(%rsp), %rdi call strcpy@PLT movslq %ebp, %rsi leaq 16(%rsp), %rdi movl $1, %edx call cudaMallocManaged@PLT movq %r13, %rsi movq 16(%rsp), %rdi call strcpy@PLT subl %ebx, %ebp leal 255(%rbp), %ebx movl %ebp, %eax subl $-128, %eax cmovns %eax, %ebx sarl $7, %ebx movl %ebx, %ecx movl $128, %edx leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movslq %ebx, %rsi salq $2, %rsi leaq 24(%rsp), %rdi movl $1, %edx call cudaMallocManaged@PLT testl %ebp, %ebp js .L12 movl $0, %eax .L13: movq 24(%rsp), %rdx movl $0, (%rdx,%rax,4) addq $1, %rax cmpl %eax, %ebx jg .L13 movl $128, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl %ebx, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L18 call cudaDeviceSynchronize@PLT .L19: movq 24(%rsp), %rdx movl $0, %eax .L16: movl (%rdx,%rax,4), %ebp testl %ebp, %ebp jne .L15 addq $1, %rax cmpl %eax, %ebx jg .L16 movl $-1, %ebp .L15: movq 24(%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L26 movl %ebp, %eax addq $64, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L17: .cfi_restore_state call cudaDeviceSynchronize@PLT movl $-1, %ebp jmp .L15 .L12: movl $128, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl %ebx, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L17 .L18: movq 24(%rsp), %rcx movl %r12d, %edx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z41__device_stub__Z16find_word_kernelPcS_iPiPcS_iPi call cudaDeviceSynchronize@PLT testl %ebp, %ebp jns .L19 movl $-1, %ebp jmp .L15 .L26: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size _Z16find_word_in_gpuPcS_, .-_Z16find_word_in_gpuPcS_ .section .rodata.str1.8 .align 8 .LC1: .string "Sorry, the word was not found in the reference string\n" .align 8 .LC2: .string "Word: %s\nReference string: %s\n\n" .align 8 .LC3: .string "The word was found at position: %d\n" .align 8 .LC4: .string "Word: %s\nReference string: %s\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC5: .string " " .LC6: .string " " .LC7: .string "^\n\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movq 8(%rsi), %r12 movq 16(%rsi), %rbx movq %r12, %rsi movq %rbx, %rdi call _Z16find_word_in_gpuPcS_ cmpl $-1, %eax je .L34 movl %eax, %ebp movl %eax, %edx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %r12, %rcx movq %rbx, %rdx leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT cmpl $1, %ebp jle .L30 subl $1, %ebp movl $0, %ebx leaq .LC6(%rip), %r12 .L31: movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %ebx cmpl %ebp, %ebx jne .L31 .L30: leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L29: movl $0, %eax popq %rbx .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L34: .cfi_restore_state leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %r12, %rcx movq %rbx, %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L29 .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC8: .string "_Z16find_word_kernelPcS_iPi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq _Z16find_word_kernelPcS_iPi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "find_word_shared.hip" .globl _Z31__device_stub__find_word_kernelPcS_iPi # -- Begin function _Z31__device_stub__find_word_kernelPcS_iPi .p2align 4, 0x90 .type _Z31__device_stub__find_word_kernelPcS_iPi,@function _Z31__device_stub__find_word_kernelPcS_iPi: # @_Z31__device_stub__find_word_kernelPcS_iPi .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 4(%rsp) movq %rcx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 4(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z16find_word_kernelPcS_iPi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z31__device_stub__find_word_kernelPcS_iPi, .Lfunc_end0-_Z31__device_stub__find_word_kernelPcS_iPi .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq 8(%rsi), %r14 movq 16(%rsi), %r15 movq %r15, %rdi movq %r14, %rsi callq _Z16find_word_in_gpuPcS_ cmpl $-1, %eax je .LBB1_1 # %bb.2: movl %eax, %ebx movl $.L.str.2, %edi movl %eax, %esi xorl %eax, %eax callq printf movl $.L.str.3, %edi movq %r15, %rsi movq %r14, %rdx xorl %eax, %eax callq printf movl $.L.str.4, %edi xorl %eax, %eax callq printf cmpl $2, %ebx jl .LBB1_5 # %bb.3: # %.lr.ph.preheader decl %ebx .p2align 4, 0x90 .LBB1_4: # %.lr.ph # =>This Inner Loop Header: Depth=1 movl $32, %edi callq putchar@PLT decl %ebx jne .LBB1_4 .LBB1_5: # %._crit_edge movl $.Lstr, %edi callq puts@PLT jmp .LBB1_6 .LBB1_1: movl $.Lstr.1, %edi callq puts@PLT movl $.L.str.1, %edi movq %r15, %rsi movq %r14, %rdx xorl %eax, %eax callq printf .LBB1_6: xorl %eax, %eax popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .globl _Z16find_word_in_gpuPcS_ # -- Begin function _Z16find_word_in_gpuPcS_ .p2align 4, 0x90 .type _Z16find_word_in_gpuPcS_,@function _Z16find_word_in_gpuPcS_: # @_Z16find_word_in_gpuPcS_ .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $152, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rsi, %r12 movq %rdi, %rbx callq strlen movq %rax, %r15 movq %r12, %rdi callq strlen movq %rax, %r14 movslq %r15d, %r13 leaq 24(%rsp), %rdi movl $1, %r15d movq %r13, %rsi movl $1, %edx callq hipMallocManaged movq 24(%rsp), %rdi movq %rbx, %rsi callq strcpy movslq %r14d, %rbx leaq 16(%rsp), %rdi movq %rbx, %rsi movl $1, %edx callq hipMallocManaged movq 16(%rsp), %rdi movq %r12, %rsi callq strcpy subl %r13d, %ebx leal 128(%rbx), %eax leal 255(%rbx), %ebp testl %eax, %eax cmovnsl %eax, %ebp sarl $7, %ebp movl $.L.str.7, %edi movl $128, %esi movl %ebp, %edx xorl %eax, %eax callq printf movslq %ebp, %rsi shlq $2, %rsi leaq 8(%rsp), %rdi movl $1, %edx callq hipMallocManaged cmpl $2147483519, %ebx # imm = 0x7FFFFF7F ja .LBB2_2 # %bb.1: # %.lr.ph movq 8(%rsp), %rdi cmpl $2, %ebp cmovgel %ebp, %r15d shlq $2, %r15 xorl %esi, %esi movq %r15, %rdx callq memset@PLT .LBB2_2: # %._crit_edge movl %ebp, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $128, %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_4 # %bb.3: movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 8(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movl %r14d, 36(%rsp) movq %rdx, 88(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 36(%rsp), %rax movq %rax, 128(%rsp) leaq 88(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z16find_word_kernelPcS_iPi, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_4: callq hipDeviceSynchronize cmpl $2147483519, %ebx # imm = 0x7FFFFF7F ja .LBB2_9 # %bb.5: # %.lr.ph36 movq 8(%rsp), %rax cmpl $2, %ebp movl $1, %ecx cmovgel %ebp, %ecx xorl %edx, %edx .p2align 4, 0x90 .LBB2_6: # =>This Inner Loop Header: Depth=1 movl (%rax,%rdx,4), %ebx testl %ebx, %ebx jne .LBB2_10 # %bb.7: # in Loop: Header=BB2_6 Depth=1 incq %rdx cmpq %rdx, %rcx jne .LBB2_6 .LBB2_9: movl $-1, %ebx .LBB2_10: # %._crit_edge37 movq 8(%rsp), %rdi callq hipFree movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movl %ebx, %eax addq $152, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z16find_word_in_gpuPcS_, .Lfunc_end2-_Z16find_word_in_gpuPcS_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z16find_word_kernelPcS_iPi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z16find_word_kernelPcS_iPi,@object # @_Z16find_word_kernelPcS_iPi .section .rodata,"a",@progbits .globl _Z16find_word_kernelPcS_iPi .p2align 3, 0x0 _Z16find_word_kernelPcS_iPi: .quad _Z31__device_stub__find_word_kernelPcS_iPi .size _Z16find_word_kernelPcS_iPi, 8 .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "Word: %s\nReference string: %s\n\n" .size .L.str.1, 32 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "The word was found at position: %d\n" .size .L.str.2, 36 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Word: %s\nReference string: %s\n" .size .L.str.3, 31 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz " " .size .L.str.4, 20 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "Launching %d threads in %d blocks\n" .size .L.str.7, 35 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z16find_word_kernelPcS_iPi" .size .L__unnamed_1, 28 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "^\n" .size .Lstr, 3 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "Sorry, the word was not found in the reference string" .size .Lstr.1, 54 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z31__device_stub__find_word_kernelPcS_iPi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z16find_word_kernelPcS_iPi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #define N 1000000 __global__ void gpuAdd(int *d_a, int *d_b, int *d_c){ //总线程id = 当前块线程id.x + 块id*块维度x int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { d_c[tid] = d_a[tid] + d_b[tid];// 加法 tid += blockDim.x * gridDim.x; // 一次执行一个格子 块维度x*格子维度x } } int main(){ int *h_a, *h_b, *h_c; int *d_a0, *d_b0, *d_c0; int *d_a1, *d_b1, *d_c1; int sizeByte = N*sizeof(int)*2; //create two streams cudaStream_t stream0, stream1; cudaStreamCreate(&stream0); cudaStreamCreate(&stream1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); //Use cudaHostMalloc to allocate page-locked memory cudaHostAlloc(&h_a, sizeByte, cudaHostAllocDefault); cudaHostAlloc(&h_b, sizeByte, cudaHostAllocDefault); cudaHostAlloc(&h_c, sizeByte, cudaHostAllocDefault); //same as single stream, just add stream parameter when launching kernel for(int i=0;i<N*2;i++){ h_a[i] = i; h_b[i] = i; } cudaMalloc(&d_a0,sizeByte/2); cudaMalloc(&d_b0,sizeByte/2); cudaMalloc(&d_c0,sizeByte/2); cudaMalloc(&d_a1,sizeByte/2); cudaMalloc(&d_b1,sizeByte/2); cudaMalloc(&d_c1,sizeByte/2); cudaMemcpyAsync(d_a0, h_a, sizeByte/2, cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(d_a1, h_a + N, sizeByte/2, cudaMemcpyHostToDevice, stream1); cudaMemcpyAsync(d_b0, h_b, sizeByte/2, cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(d_b1, h_b + N, sizeByte/2, cudaMemcpyHostToDevice, stream1); gpuAdd<<<512,512,0,stream0>>>(d_a0,d_b0,d_c0); gpuAdd<<<512,512,0,stream1>>>(d_a1,d_b1,d_c1); cudaMemcpyAsync(h_c, d_c0, sizeByte/2, cudaMemcpyDeviceToHost, stream0); cudaMemcpyAsync(h_c+N, d_c1, sizeByte/2, cudaMemcpyDeviceToHost, stream1); //only synchronize on cpu/host cudaDeviceSynchronize(); cudaStreamSynchronize(stream0); cudaStreamSynchronize(stream1); cudaEventRecord(stop); cudaEventSynchronize(stop); float time = 0; cudaEventElapsedTime(&time, start, stop); printf("Time consumption: %lf\n", time); cudaEventDestroy(stop); cudaEventDestroy(start); int Correct = 1; int wrongIndex = -1; printf("Vector addition on GPU \n"); //Printing result on console for (int i = 0; i < 2*N; i++) { if ((h_a[i] + h_b[i] != h_c[i])) { Correct = 0; wrongIndex = i; break; } } if (Correct == 1) { printf("GPU has computed Sum Correctly\n"); } else { printf("There is an Error in GPU Computation, at index %d, CPU(%d)!=GPU(%d)\n", wrongIndex, (h_a[wrongIndex] + h_b[wrongIndex]), h_c[wrongIndex]); } // 清空GPU内存 cudaFree(d_a0); cudaFree(d_b0); cudaFree(d_c0); cudaFree(d_a0); cudaFree(d_b0); cudaFree(d_c0); // 清空cuda分配的cpu内存 cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); return 0; }
code for sm_80 Function : _Z6gpuAddPiS_S_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GT.AND P0, PT, R0, 0xf423f, PT ; /* 0x000f423f0000780c */ /* 0x000fda0003f04270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0070*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x001fd400000001ff */ /*0080*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fc800078e0207 */ /*0090*/ IMAD.WIDE R4, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x0c0fe400078e0207 */ /*00a0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */ /* 0x000fe200078e0207 */ /*00d0*/ MOV R11, c[0x0][0x0] ; /* 0x00000000000b7a02 */ /* 0x000fca0000000f00 */ /*00e0*/ IMAD R0, R11, c[0x0][0xc], R0 ; /* 0x000003000b007a24 */ /* 0x000fca00078e0200 */ /*00f0*/ ISETP.GE.AND P0, PT, R0, 0xf4240, PT ; /* 0x000f42400000780c */ /* 0x000fe40003f06270 */ /*0100*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */ /* 0x004fca0007ffe0ff */ /*0110*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001ec000c101904 */ /*0120*/ @!P0 BRA 0x70 ; /* 0xffffff4000008947 */ /* 0x000fea000383ffff */ /*0130*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0140*/ BRA 0x140; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #define N 1000000 __global__ void gpuAdd(int *d_a, int *d_b, int *d_c){ //总线程id = 当前块线程id.x + 块id*块维度x int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { d_c[tid] = d_a[tid] + d_b[tid];// 加法 tid += blockDim.x * gridDim.x; // 一次执行一个格子 块维度x*格子维度x } } int main(){ int *h_a, *h_b, *h_c; int *d_a0, *d_b0, *d_c0; int *d_a1, *d_b1, *d_c1; int sizeByte = N*sizeof(int)*2; //create two streams cudaStream_t stream0, stream1; cudaStreamCreate(&stream0); cudaStreamCreate(&stream1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); //Use cudaHostMalloc to allocate page-locked memory cudaHostAlloc(&h_a, sizeByte, cudaHostAllocDefault); cudaHostAlloc(&h_b, sizeByte, cudaHostAllocDefault); cudaHostAlloc(&h_c, sizeByte, cudaHostAllocDefault); //same as single stream, just add stream parameter when launching kernel for(int i=0;i<N*2;i++){ h_a[i] = i; h_b[i] = i; } cudaMalloc(&d_a0,sizeByte/2); cudaMalloc(&d_b0,sizeByte/2); cudaMalloc(&d_c0,sizeByte/2); cudaMalloc(&d_a1,sizeByte/2); cudaMalloc(&d_b1,sizeByte/2); cudaMalloc(&d_c1,sizeByte/2); cudaMemcpyAsync(d_a0, h_a, sizeByte/2, cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(d_a1, h_a + N, sizeByte/2, cudaMemcpyHostToDevice, stream1); cudaMemcpyAsync(d_b0, h_b, sizeByte/2, cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(d_b1, h_b + N, sizeByte/2, cudaMemcpyHostToDevice, stream1); gpuAdd<<<512,512,0,stream0>>>(d_a0,d_b0,d_c0); gpuAdd<<<512,512,0,stream1>>>(d_a1,d_b1,d_c1); cudaMemcpyAsync(h_c, d_c0, sizeByte/2, cudaMemcpyDeviceToHost, stream0); cudaMemcpyAsync(h_c+N, d_c1, sizeByte/2, cudaMemcpyDeviceToHost, stream1); //only synchronize on cpu/host cudaDeviceSynchronize(); cudaStreamSynchronize(stream0); cudaStreamSynchronize(stream1); cudaEventRecord(stop); cudaEventSynchronize(stop); float time = 0; cudaEventElapsedTime(&time, start, stop); printf("Time consumption: %lf\n", time); cudaEventDestroy(stop); cudaEventDestroy(start); int Correct = 1; int wrongIndex = -1; printf("Vector addition on GPU \n"); //Printing result on console for (int i = 0; i < 2*N; i++) { if ((h_a[i] + h_b[i] != h_c[i])) { Correct = 0; wrongIndex = i; break; } } if (Correct == 1) { printf("GPU has computed Sum Correctly\n"); } else { printf("There is an Error in GPU Computation, at index %d, CPU(%d)!=GPU(%d)\n", wrongIndex, (h_a[wrongIndex] + h_b[wrongIndex]), h_c[wrongIndex]); } // 清空GPU内存 cudaFree(d_a0); cudaFree(d_b0); cudaFree(d_c0); cudaFree(d_a0); cudaFree(d_b0); cudaFree(d_c0); // 清空cuda分配的cpu内存 cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); return 0; }
.file "tmpxft_00059b2a_00000000-6_async.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_ .type _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_, @function _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_: .LFB2082: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z6gpuAddPiS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_, .-_Z29__device_stub__Z6gpuAddPiS_S_PiS_S_ .globl _Z6gpuAddPiS_S_ .type _Z6gpuAddPiS_S_, @function _Z6gpuAddPiS_S_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z6gpuAddPiS_S_, .-_Z6gpuAddPiS_S_ .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "Time consumption: %lf\n" .LC2: .string "Vector addition on GPU \n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC3: .string "GPU has computed Sum Correctly\n" .align 8 .LC4: .string "There is an Error in GPU Computation, at index %d, CPU(%d)!=GPU(%d)\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 80(%rsp), %rdi call cudaStreamCreate@PLT leaq 88(%rsp), %rdi call cudaStreamCreate@PLT leaq 96(%rsp), %rdi call cudaEventCreate@PLT leaq 104(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 96(%rsp), %rdi call cudaEventRecord@PLT leaq 8(%rsp), %rdi movl $0, %edx movl $8000000, %esi call cudaHostAlloc@PLT leaq 16(%rsp), %rdi movl $0, %edx movl $8000000, %esi call cudaHostAlloc@PLT leaq 24(%rsp), %rdi movl $0, %edx movl $8000000, %esi call cudaHostAlloc@PLT movl $0, %eax .L12: movq 8(%rsp), %rdx movl %eax, (%rdx,%rax,4) movq 16(%rsp), %rdx movl %eax, (%rdx,%rax,4) addq $1, %rax cmpq $2000000, %rax jne .L12 leaq 32(%rsp), %rdi movl $4000000, %esi call cudaMalloc@PLT leaq 40(%rsp), %rdi movl $4000000, %esi call cudaMalloc@PLT leaq 48(%rsp), %rdi movl $4000000, %esi call cudaMalloc@PLT leaq 56(%rsp), %rdi movl $4000000, %esi call cudaMalloc@PLT leaq 64(%rsp), %rdi movl $4000000, %esi call cudaMalloc@PLT leaq 72(%rsp), %rdi movl $4000000, %esi call cudaMalloc@PLT movq 80(%rsp), %r8 movl $1, %ecx movl $4000000, %edx movq 8(%rsp), %rsi movq 32(%rsp), %rdi call cudaMemcpyAsync@PLT movq 8(%rsp), %rax leaq 4000000(%rax), %rsi movq 88(%rsp), %r8 movl $1, %ecx movl $4000000, %edx movq 56(%rsp), %rdi call cudaMemcpyAsync@PLT movq 80(%rsp), %r8 movl $1, %ecx movl $4000000, %edx movq 16(%rsp), %rsi movq 40(%rsp), %rdi call cudaMemcpyAsync@PLT movq 16(%rsp), %rax leaq 4000000(%rax), %rsi movq 88(%rsp), %r8 movl $1, %ecx movl $4000000, %edx movq 64(%rsp), %rdi call cudaMemcpyAsync@PLT movl $512, 124(%rsp) movl $1, 128(%rsp) movl $1, 132(%rsp) movl $512, 112(%rsp) movl $1, 116(%rsp) movl $1, 120(%rsp) movq 80(%rsp), %r9 movl $0, %r8d movq 124(%rsp), %rdx movl $1, %ecx movq 112(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L25 .L13: movl $512, 124(%rsp) movl $1, 128(%rsp) movl $1, 132(%rsp) movl $512, 112(%rsp) movl $1, 116(%rsp) movl $1, 120(%rsp) movq 88(%rsp), %r9 movl $0, %r8d movq 124(%rsp), %rdx movl $1, %ecx movq 112(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L26 .L14: movq 80(%rsp), %r8 movl $2, %ecx movl $4000000, %edx movq 48(%rsp), %rsi movq 24(%rsp), %rdi call cudaMemcpyAsync@PLT movq 24(%rsp), %rax leaq 4000000(%rax), %rdi movq 88(%rsp), %r8 movl $2, %ecx movl $4000000, %edx movq 72(%rsp), %rsi call cudaMemcpyAsync@PLT call cudaDeviceSynchronize@PLT movq 80(%rsp), %rdi call cudaStreamSynchronize@PLT movq 88(%rsp), %rdi call cudaStreamSynchronize@PLT movl $0, %esi movq 104(%rsp), %rdi call cudaEventRecord@PLT movq 104(%rsp), %rdi call cudaEventSynchronize@PLT movl $0x00000000, 124(%rsp) leaq 124(%rsp), %rdi movq 104(%rsp), %rdx movq 96(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 124(%rsp), %xmm0 leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 104(%rsp), %rdi call cudaEventDestroy@PLT movq 96(%rsp), %rdi call cudaEventDestroy@PLT leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 8(%rsp), %rdi movq 16(%rsp), %rcx movq 24(%rsp), %rsi movl $0, %eax .L16: movl (%rcx,%rax,4), %edx addl (%rdi,%rax,4), %edx cmpl (%rsi,%rax,4), %edx jne .L15 addq $1, %rax cmpq $2000000, %rax jne .L16 leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L18 .L25: movq 48(%rsp), %rdx movq 40(%rsp), %rsi movq 32(%rsp), %rdi call _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_ jmp .L13 .L26: movq 72(%rsp), %rdx movq 64(%rsp), %rsi movq 56(%rsp), %rdi call _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_ jmp .L14 .L15: movl %eax, %edx cltq movl (%rcx,%rax,4), %ecx addl (%rdi,%rax,4), %ecx movl (%rsi,%rax,4), %r8d leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L18: movq 32(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq 48(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq 48(%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFreeHost@PLT movq 16(%rsp), %rdi call cudaFreeHost@PLT movq 24(%rsp), %rdi call cudaFreeHost@PLT movq 136(%rsp), %rax subq %fs:40, %rax jne .L27 movl $0, %eax addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L27: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC5: .string "_Z6gpuAddPiS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _Z6gpuAddPiS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #define N 1000000 __global__ void gpuAdd(int *d_a, int *d_b, int *d_c){ //总线程id = 当前块线程id.x + 块id*块维度x int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { d_c[tid] = d_a[tid] + d_b[tid];// 加法 tid += blockDim.x * gridDim.x; // 一次执行一个格子 块维度x*格子维度x } } int main(){ int *h_a, *h_b, *h_c; int *d_a0, *d_b0, *d_c0; int *d_a1, *d_b1, *d_c1; int sizeByte = N*sizeof(int)*2; //create two streams cudaStream_t stream0, stream1; cudaStreamCreate(&stream0); cudaStreamCreate(&stream1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); //Use cudaHostMalloc to allocate page-locked memory cudaHostAlloc(&h_a, sizeByte, cudaHostAllocDefault); cudaHostAlloc(&h_b, sizeByte, cudaHostAllocDefault); cudaHostAlloc(&h_c, sizeByte, cudaHostAllocDefault); //same as single stream, just add stream parameter when launching kernel for(int i=0;i<N*2;i++){ h_a[i] = i; h_b[i] = i; } cudaMalloc(&d_a0,sizeByte/2); cudaMalloc(&d_b0,sizeByte/2); cudaMalloc(&d_c0,sizeByte/2); cudaMalloc(&d_a1,sizeByte/2); cudaMalloc(&d_b1,sizeByte/2); cudaMalloc(&d_c1,sizeByte/2); cudaMemcpyAsync(d_a0, h_a, sizeByte/2, cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(d_a1, h_a + N, sizeByte/2, cudaMemcpyHostToDevice, stream1); cudaMemcpyAsync(d_b0, h_b, sizeByte/2, cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(d_b1, h_b + N, sizeByte/2, cudaMemcpyHostToDevice, stream1); gpuAdd<<<512,512,0,stream0>>>(d_a0,d_b0,d_c0); gpuAdd<<<512,512,0,stream1>>>(d_a1,d_b1,d_c1); cudaMemcpyAsync(h_c, d_c0, sizeByte/2, cudaMemcpyDeviceToHost, stream0); cudaMemcpyAsync(h_c+N, d_c1, sizeByte/2, cudaMemcpyDeviceToHost, stream1); //only synchronize on cpu/host cudaDeviceSynchronize(); cudaStreamSynchronize(stream0); cudaStreamSynchronize(stream1); cudaEventRecord(stop); cudaEventSynchronize(stop); float time = 0; cudaEventElapsedTime(&time, start, stop); printf("Time consumption: %lf\n", time); cudaEventDestroy(stop); cudaEventDestroy(start); int Correct = 1; int wrongIndex = -1; printf("Vector addition on GPU \n"); //Printing result on console for (int i = 0; i < 2*N; i++) { if ((h_a[i] + h_b[i] != h_c[i])) { Correct = 0; wrongIndex = i; break; } } if (Correct == 1) { printf("GPU has computed Sum Correctly\n"); } else { printf("There is an Error in GPU Computation, at index %d, CPU(%d)!=GPU(%d)\n", wrongIndex, (h_a[wrongIndex] + h_b[wrongIndex]), h_c[wrongIndex]); } // 清空GPU内存 cudaFree(d_a0); cudaFree(d_b0); cudaFree(d_c0); cudaFree(d_a0); cudaFree(d_b0); cudaFree(d_c0); // 清空cuda分配的cpu内存 cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #define N 1000000 __global__ void gpuAdd(int *d_a, int *d_b, int *d_c){ //总线程id = 当前块线程id.x + 块id*块维度x int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { d_c[tid] = d_a[tid] + d_b[tid];// 加法 tid += blockDim.x * gridDim.x; // 一次执行一个格子 块维度x*格子维度x } } int main(){ int *h_a, *h_b, *h_c; int *d_a0, *d_b0, *d_c0; int *d_a1, *d_b1, *d_c1; int sizeByte = N*sizeof(int)*2; //create two streams hipStream_t stream0, stream1; hipStreamCreate(&stream0); hipStreamCreate(&stream1); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); //Use cudaHostMalloc to allocate page-locked memory hipHostAlloc(&h_a, sizeByte, hipHostMallocDefault); hipHostAlloc(&h_b, sizeByte, hipHostMallocDefault); hipHostAlloc(&h_c, sizeByte, hipHostMallocDefault); //same as single stream, just add stream parameter when launching kernel for(int i=0;i<N*2;i++){ h_a[i] = i; h_b[i] = i; } hipMalloc(&d_a0,sizeByte/2); hipMalloc(&d_b0,sizeByte/2); hipMalloc(&d_c0,sizeByte/2); hipMalloc(&d_a1,sizeByte/2); hipMalloc(&d_b1,sizeByte/2); hipMalloc(&d_c1,sizeByte/2); hipMemcpyAsync(d_a0, h_a, sizeByte/2, hipMemcpyHostToDevice, stream0); hipMemcpyAsync(d_a1, h_a + N, sizeByte/2, hipMemcpyHostToDevice, stream1); hipMemcpyAsync(d_b0, h_b, sizeByte/2, hipMemcpyHostToDevice, stream0); hipMemcpyAsync(d_b1, h_b + N, sizeByte/2, hipMemcpyHostToDevice, stream1); gpuAdd<<<512,512,0,stream0>>>(d_a0,d_b0,d_c0); gpuAdd<<<512,512,0,stream1>>>(d_a1,d_b1,d_c1); hipMemcpyAsync(h_c, d_c0, sizeByte/2, hipMemcpyDeviceToHost, stream0); hipMemcpyAsync(h_c+N, d_c1, sizeByte/2, hipMemcpyDeviceToHost, stream1); //only synchronize on cpu/host hipDeviceSynchronize(); hipStreamSynchronize(stream0); hipStreamSynchronize(stream1); hipEventRecord(stop); hipEventSynchronize(stop); float time = 0; hipEventElapsedTime(&time, start, stop); printf("Time consumption: %lf\n", time); hipEventDestroy(stop); hipEventDestroy(start); int Correct = 1; int wrongIndex = -1; printf("Vector addition on GPU \n"); //Printing result on console for (int i = 0; i < 2*N; i++) { if ((h_a[i] + h_b[i] != h_c[i])) { Correct = 0; wrongIndex = i; break; } } if (Correct == 1) { printf("GPU has computed Sum Correctly\n"); } else { printf("There is an Error in GPU Computation, at index %d, CPU(%d)!=GPU(%d)\n", wrongIndex, (h_a[wrongIndex] + h_b[wrongIndex]), h_c[wrongIndex]); } // 清空GPU内存 hipFree(d_a0); hipFree(d_b0); hipFree(d_c0); hipFree(d_a0); hipFree(d_b0); hipFree(d_c0); // 清空cuda分配的cpu内存 hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #define N 1000000 __global__ void gpuAdd(int *d_a, int *d_b, int *d_c){ //总线程id = 当前块线程id.x + 块id*块维度x int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { d_c[tid] = d_a[tid] + d_b[tid];// 加法 tid += blockDim.x * gridDim.x; // 一次执行一个格子 块维度x*格子维度x } } int main(){ int *h_a, *h_b, *h_c; int *d_a0, *d_b0, *d_c0; int *d_a1, *d_b1, *d_c1; int sizeByte = N*sizeof(int)*2; //create two streams hipStream_t stream0, stream1; hipStreamCreate(&stream0); hipStreamCreate(&stream1); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); //Use cudaHostMalloc to allocate page-locked memory hipHostAlloc(&h_a, sizeByte, hipHostMallocDefault); hipHostAlloc(&h_b, sizeByte, hipHostMallocDefault); hipHostAlloc(&h_c, sizeByte, hipHostMallocDefault); //same as single stream, just add stream parameter when launching kernel for(int i=0;i<N*2;i++){ h_a[i] = i; h_b[i] = i; } hipMalloc(&d_a0,sizeByte/2); hipMalloc(&d_b0,sizeByte/2); hipMalloc(&d_c0,sizeByte/2); hipMalloc(&d_a1,sizeByte/2); hipMalloc(&d_b1,sizeByte/2); hipMalloc(&d_c1,sizeByte/2); hipMemcpyAsync(d_a0, h_a, sizeByte/2, hipMemcpyHostToDevice, stream0); hipMemcpyAsync(d_a1, h_a + N, sizeByte/2, hipMemcpyHostToDevice, stream1); hipMemcpyAsync(d_b0, h_b, sizeByte/2, hipMemcpyHostToDevice, stream0); hipMemcpyAsync(d_b1, h_b + N, sizeByte/2, hipMemcpyHostToDevice, stream1); gpuAdd<<<512,512,0,stream0>>>(d_a0,d_b0,d_c0); gpuAdd<<<512,512,0,stream1>>>(d_a1,d_b1,d_c1); hipMemcpyAsync(h_c, d_c0, sizeByte/2, hipMemcpyDeviceToHost, stream0); hipMemcpyAsync(h_c+N, d_c1, sizeByte/2, hipMemcpyDeviceToHost, stream1); //only synchronize on cpu/host hipDeviceSynchronize(); hipStreamSynchronize(stream0); hipStreamSynchronize(stream1); hipEventRecord(stop); hipEventSynchronize(stop); float time = 0; hipEventElapsedTime(&time, start, stop); printf("Time consumption: %lf\n", time); hipEventDestroy(stop); hipEventDestroy(start); int Correct = 1; int wrongIndex = -1; printf("Vector addition on GPU \n"); //Printing result on console for (int i = 0; i < 2*N; i++) { if ((h_a[i] + h_b[i] != h_c[i])) { Correct = 0; wrongIndex = i; break; } } if (Correct == 1) { printf("GPU has computed Sum Correctly\n"); } else { printf("There is an Error in GPU Computation, at index %d, CPU(%d)!=GPU(%d)\n", wrongIndex, (h_a[wrongIndex] + h_b[wrongIndex]), h_c[wrongIndex]); } // 清空GPU内存 hipFree(d_a0); hipFree(d_b0); hipFree(d_c0); hipFree(d_a0); hipFree(d_b0); hipFree(d_c0); // 清空cuda分配的cpu内存 hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6gpuAddPiS_S_ .globl _Z6gpuAddPiS_S_ .p2align 8 .type _Z6gpuAddPiS_S_,@function _Z6gpuAddPiS_S_: s_load_b32 s4, s[0:1], 0x24 s_add_u32 s2, s0, 24 s_addc_u32 s3, s1, 0 s_waitcnt lgkmcnt(0) s_and_b32 s8, s4, 0xffff s_mov_b32 s4, exec_lo v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1] s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_i32_e32 0xf4240, v1 s_cbranch_execz .LBB0_3 s_load_b32 s9, s[2:3], 0x0 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[2:3], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_mul_i32 s1, s9, s8 s_mov_b32 s8, 0 .p2align 6 .LBB0_2: v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[2:3], 2, v[1:2] v_add_nc_u32_e32 v1, s1, v1 v_add_co_u32 v4, vcc_lo, s4, v2 s_delay_alu instid0(VALU_DEP_3) v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo v_add_co_u32 v6, vcc_lo, s6, v2 v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo v_cmp_lt_i32_e32 vcc_lo, 0xf423f, v1 global_load_b32 v0, v[4:5], off global_load_b32 v4, v[6:7], off v_add_co_u32 v2, s0, s2, v2 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, s0, s3, v3, s0 s_or_b32 s8, vcc_lo, s8 s_waitcnt vmcnt(0) v_add_nc_u32_e32 v0, v4, v0 global_store_b32 v[2:3], v0, off s_and_not1_b32 exec_lo, exec_lo, s8 s_cbranch_execnz .LBB0_2 .LBB0_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6gpuAddPiS_S_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6gpuAddPiS_S_, .Lfunc_end0-_Z6gpuAddPiS_S_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6gpuAddPiS_S_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z6gpuAddPiS_S_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #define N 1000000 __global__ void gpuAdd(int *d_a, int *d_b, int *d_c){ //总线程id = 当前块线程id.x + 块id*块维度x int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { d_c[tid] = d_a[tid] + d_b[tid];// 加法 tid += blockDim.x * gridDim.x; // 一次执行一个格子 块维度x*格子维度x } } int main(){ int *h_a, *h_b, *h_c; int *d_a0, *d_b0, *d_c0; int *d_a1, *d_b1, *d_c1; int sizeByte = N*sizeof(int)*2; //create two streams hipStream_t stream0, stream1; hipStreamCreate(&stream0); hipStreamCreate(&stream1); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); //Use cudaHostMalloc to allocate page-locked memory hipHostAlloc(&h_a, sizeByte, hipHostMallocDefault); hipHostAlloc(&h_b, sizeByte, hipHostMallocDefault); hipHostAlloc(&h_c, sizeByte, hipHostMallocDefault); //same as single stream, just add stream parameter when launching kernel for(int i=0;i<N*2;i++){ h_a[i] = i; h_b[i] = i; } hipMalloc(&d_a0,sizeByte/2); hipMalloc(&d_b0,sizeByte/2); hipMalloc(&d_c0,sizeByte/2); hipMalloc(&d_a1,sizeByte/2); hipMalloc(&d_b1,sizeByte/2); hipMalloc(&d_c1,sizeByte/2); hipMemcpyAsync(d_a0, h_a, sizeByte/2, hipMemcpyHostToDevice, stream0); hipMemcpyAsync(d_a1, h_a + N, sizeByte/2, hipMemcpyHostToDevice, stream1); hipMemcpyAsync(d_b0, h_b, sizeByte/2, hipMemcpyHostToDevice, stream0); hipMemcpyAsync(d_b1, h_b + N, sizeByte/2, hipMemcpyHostToDevice, stream1); gpuAdd<<<512,512,0,stream0>>>(d_a0,d_b0,d_c0); gpuAdd<<<512,512,0,stream1>>>(d_a1,d_b1,d_c1); hipMemcpyAsync(h_c, d_c0, sizeByte/2, hipMemcpyDeviceToHost, stream0); hipMemcpyAsync(h_c+N, d_c1, sizeByte/2, hipMemcpyDeviceToHost, stream1); //only synchronize on cpu/host hipDeviceSynchronize(); hipStreamSynchronize(stream0); hipStreamSynchronize(stream1); hipEventRecord(stop); hipEventSynchronize(stop); float time = 0; hipEventElapsedTime(&time, start, stop); printf("Time consumption: %lf\n", time); hipEventDestroy(stop); hipEventDestroy(start); int Correct = 1; int wrongIndex = -1; printf("Vector addition on GPU \n"); //Printing result on console for (int i = 0; i < 2*N; i++) { if ((h_a[i] + h_b[i] != h_c[i])) { Correct = 0; wrongIndex = i; break; } } if (Correct == 1) { printf("GPU has computed Sum Correctly\n"); } else { printf("There is an Error in GPU Computation, at index %d, CPU(%d)!=GPU(%d)\n", wrongIndex, (h_a[wrongIndex] + h_b[wrongIndex]), h_c[wrongIndex]); } // 清空GPU内存 hipFree(d_a0); hipFree(d_b0); hipFree(d_c0); hipFree(d_a0); hipFree(d_b0); hipFree(d_c0); // 清空cuda分配的cpu内存 hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c); return 0; }
.text .file "async.hip" .globl _Z21__device_stub__gpuAddPiS_S_ # -- Begin function _Z21__device_stub__gpuAddPiS_S_ .p2align 4, 0x90 .type _Z21__device_stub__gpuAddPiS_S_,@function _Z21__device_stub__gpuAddPiS_S_: # @_Z21__device_stub__gpuAddPiS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6gpuAddPiS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z21__device_stub__gpuAddPiS_S_, .Lfunc_end0-_Z21__device_stub__gpuAddPiS_S_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $216, %rsp .cfi_def_cfa_offset 240 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 leaq 32(%rsp), %rdi callq hipStreamCreate leaq 24(%rsp), %rdi callq hipStreamCreate leaq 80(%rsp), %rdi callq hipEventCreate leaq 48(%rsp), %rdi callq hipEventCreate movq 80(%rsp), %rdi xorl %ebx, %ebx xorl %esi, %esi callq hipEventRecord leaq 16(%rsp), %rdi movl $8000000, %esi # imm = 0x7A1200 xorl %edx, %edx callq hipHostAlloc leaq 8(%rsp), %rdi movl $8000000, %esi # imm = 0x7A1200 xorl %edx, %edx callq hipHostAlloc leaq 40(%rsp), %rdi movl $8000000, %esi # imm = 0x7A1200 xorl %edx, %edx callq hipHostAlloc movq 16(%rsp), %rax movq 8(%rsp), %rcx .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movl %ebx, (%rax,%rbx,4) movl %ebx, (%rcx,%rbx,4) incq %rbx cmpq $2000000, %rbx # imm = 0x1E8480 jne .LBB1_1 # %bb.2: movabsq $4294967808, %rbx # imm = 0x100000200 leaq 72(%rsp), %rdi movl $4000000, %r14d # imm = 0x3D0900 movl $4000000, %esi # imm = 0x3D0900 callq hipMalloc leaq 64(%rsp), %rdi movl $4000000, %esi # imm = 0x3D0900 callq hipMalloc leaq 56(%rsp), %rdi movl $4000000, %esi # imm = 0x3D0900 callq hipMalloc leaq 208(%rsp), %rdi movl $4000000, %esi # imm = 0x3D0900 callq hipMalloc leaq 200(%rsp), %rdi movl $4000000, %esi # imm = 0x3D0900 callq hipMalloc leaq 192(%rsp), %rdi movl $4000000, %esi # imm = 0x3D0900 callq hipMalloc movq 72(%rsp), %rdi movq 16(%rsp), %rsi movq 32(%rsp), %r8 movl $4000000, %edx # imm = 0x3D0900 movl $1, %ecx callq hipMemcpyAsync movq 208(%rsp), %rdi movq 16(%rsp), %rsi addq %r14, %rsi movq 24(%rsp), %r8 movl $4000000, %edx # imm = 0x3D0900 movl $1, %ecx callq hipMemcpyAsync movq 64(%rsp), %rdi movq 8(%rsp), %rsi movq 32(%rsp), %r8 movl $4000000, %edx # imm = 0x3D0900 movl $1, %ecx callq hipMemcpyAsync movq 200(%rsp), %rdi addq 8(%rsp), %r14 movq 24(%rsp), %r8 movl $4000000, %edx # imm = 0x3D0900 movq %r14, %rsi movl $1, %ecx callq hipMemcpyAsync movq 32(%rsp), %r9 movq %rbx, %rdi movl $1, %esi movq %rbx, %rdx movl $1, %ecx xorl %r8d, %r8d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_4 # %bb.3: movq 72(%rsp), %rax movq 64(%rsp), %rcx movq 56(%rsp), %rdx movq %rax, 152(%rsp) movq %rcx, 144(%rsp) movq %rdx, 136(%rsp) leaq 152(%rsp), %rax movq %rax, 160(%rsp) leaq 144(%rsp), %rax movq %rax, 168(%rsp) leaq 136(%rsp), %rax movq %rax, 176(%rsp) leaq 120(%rsp), %rdi leaq 104(%rsp), %rsi leaq 96(%rsp), %rdx leaq 88(%rsp), %rcx callq __hipPopCallConfiguration movq 120(%rsp), %rsi movl 128(%rsp), %edx movq 104(%rsp), %rcx movl 112(%rsp), %r8d leaq 160(%rsp), %r9 movl $_Z6gpuAddPiS_S_, %edi pushq 88(%rsp) .cfi_adjust_cfa_offset 8 pushq 104(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_4: movq 24(%rsp), %r9 xorl %r14d, %r14d movq %rbx, %rdi movl $1, %esi movq %rbx, %rdx movl $1, %ecx xorl %r8d, %r8d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_6 # %bb.5: movq 208(%rsp), %rax movq 200(%rsp), %rcx movq 192(%rsp), %rdx movq %rax, 152(%rsp) movq %rcx, 144(%rsp) movq %rdx, 136(%rsp) leaq 152(%rsp), %rax movq %rax, 160(%rsp) leaq 144(%rsp), %rax movq %rax, 168(%rsp) leaq 136(%rsp), %rax movq %rax, 176(%rsp) leaq 120(%rsp), %rdi leaq 104(%rsp), %rsi leaq 96(%rsp), %rdx leaq 88(%rsp), %rcx callq __hipPopCallConfiguration movq 120(%rsp), %rsi movl 128(%rsp), %edx movq 104(%rsp), %rcx movl 112(%rsp), %r8d leaq 160(%rsp), %r9 movl $_Z6gpuAddPiS_S_, %edi pushq 88(%rsp) .cfi_adjust_cfa_offset 8 pushq 104(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_6: movq 40(%rsp), %rdi movq 56(%rsp), %rsi movq 32(%rsp), %r8 movl $4000000, %ebx # imm = 0x3D0900 movl $4000000, %edx # imm = 0x3D0900 movl $2, %ecx callq hipMemcpyAsync addq 40(%rsp), %rbx movq 192(%rsp), %rsi movq 24(%rsp), %r8 movl $4000000, %edx # imm = 0x3D0900 movq %rbx, %rdi movl $2, %ecx callq hipMemcpyAsync callq hipDeviceSynchronize movq 32(%rsp), %rdi callq hipStreamSynchronize movq 24(%rsp), %rdi callq hipStreamSynchronize movq 48(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 48(%rsp), %rdi callq hipEventSynchronize movl $0, 160(%rsp) movq 80(%rsp), %rsi movq 48(%rsp), %rdx leaq 160(%rsp), %rdi callq hipEventElapsedTime movss 160(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf movq 48(%rsp), %rdi callq hipEventDestroy movq 80(%rsp), %rdi callq hipEventDestroy movl $.Lstr, %edi callq puts@PLT movq 16(%rsp), %rax movq 8(%rsp), %rcx movl (%rcx), %edi addl (%rax), %edi movq 40(%rsp), %rdx movl $0, %esi cmpl (%rdx), %edi jne .LBB1_11 # %bb.7: # %.lr.ph.preheader xorl %esi, %esi .p2align 4, 0x90 .LBB1_9: # %.lr.ph # =>This Inner Loop Header: Depth=1 cmpq $1999999, %rsi # imm = 0x1E847F je .LBB1_10 # %bb.8: # in Loop: Header=BB1_9 Depth=1 movl 4(%rcx,%rsi,4), %edi addl 4(%rax,%rsi,4), %edi leaq 1(%rsi), %r14 cmpl 4(%rdx,%rsi,4), %edi movq %r14, %rsi je .LBB1_9 # %bb.15: # %._crit_edge leaq -1(%r14), %rax cmpq $1999999, %rax # imm = 0x1E847F setae %sil .LBB1_11: # %._crit_edge69 testb %sil, %sil je .LBB1_13 .LBB1_12: movl $.Lstr.1, %edi callq puts@PLT jmp .LBB1_14 .LBB1_10: # %._crit_edge69.loopexit setae %sil movl $-1, %r14d testb %sil, %sil jne .LBB1_12 .LBB1_13: movq 16(%rsp), %rax movslq %r14d, %rsi movq 8(%rsp), %rcx movl (%rcx,%rsi,4), %edx addl (%rax,%rsi,4), %edx movq 40(%rsp), %rax movl (%rax,%rsi,4), %ecx movl $.L.str.3, %edi # kill: def $esi killed $esi killed $rsi xorl %eax, %eax callq printf .LBB1_14: movq 72(%rsp), %rdi callq hipFree movq 64(%rsp), %rdi callq hipFree movq 56(%rsp), %rdi callq hipFree movq 72(%rsp), %rdi callq hipFree movq 64(%rsp), %rdi callq hipFree movq 56(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipHostFree movq 8(%rsp), %rdi callq hipHostFree movq 40(%rsp), %rdi callq hipHostFree xorl %eax, %eax addq $216, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6gpuAddPiS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z6gpuAddPiS_S_,@object # @_Z6gpuAddPiS_S_ .section .rodata,"a",@progbits .globl _Z6gpuAddPiS_S_ .p2align 3, 0x0 _Z6gpuAddPiS_S_: .quad _Z21__device_stub__gpuAddPiS_S_ .size _Z6gpuAddPiS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Time consumption: %lf\n" .size .L.str, 23 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "There is an Error in GPU Computation, at index %d, CPU(%d)!=GPU(%d)\n" .size .L.str.3, 69 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6gpuAddPiS_S_" .size .L__unnamed_1, 16 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Vector addition on GPU " .size .Lstr, 24 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "GPU has computed Sum Correctly" .size .Lstr.1, 31 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__gpuAddPiS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6gpuAddPiS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z6gpuAddPiS_S_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GT.AND P0, PT, R0, 0xf423f, PT ; /* 0x000f423f0000780c */ /* 0x000fda0003f04270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0070*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x001fd400000001ff */ /*0080*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fc800078e0207 */ /*0090*/ IMAD.WIDE R4, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x0c0fe400078e0207 */ /*00a0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */ /* 0x000fe200078e0207 */ /*00d0*/ MOV R11, c[0x0][0x0] ; /* 0x00000000000b7a02 */ /* 0x000fca0000000f00 */ /*00e0*/ IMAD R0, R11, c[0x0][0xc], R0 ; /* 0x000003000b007a24 */ /* 0x000fca00078e0200 */ /*00f0*/ ISETP.GE.AND P0, PT, R0, 0xf4240, PT ; /* 0x000f42400000780c */ /* 0x000fe40003f06270 */ /*0100*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */ /* 0x004fca0007ffe0ff */ /*0110*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001ec000c101904 */ /*0120*/ @!P0 BRA 0x70 ; /* 0xffffff4000008947 */ /* 0x000fea000383ffff */ /*0130*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0140*/ BRA 0x140; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6gpuAddPiS_S_ .globl _Z6gpuAddPiS_S_ .p2align 8 .type _Z6gpuAddPiS_S_,@function _Z6gpuAddPiS_S_: s_load_b32 s4, s[0:1], 0x24 s_add_u32 s2, s0, 24 s_addc_u32 s3, s1, 0 s_waitcnt lgkmcnt(0) s_and_b32 s8, s4, 0xffff s_mov_b32 s4, exec_lo v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1] s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_i32_e32 0xf4240, v1 s_cbranch_execz .LBB0_3 s_load_b32 s9, s[2:3], 0x0 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[2:3], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_mul_i32 s1, s9, s8 s_mov_b32 s8, 0 .p2align 6 .LBB0_2: v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[2:3], 2, v[1:2] v_add_nc_u32_e32 v1, s1, v1 v_add_co_u32 v4, vcc_lo, s4, v2 s_delay_alu instid0(VALU_DEP_3) v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo v_add_co_u32 v6, vcc_lo, s6, v2 v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo v_cmp_lt_i32_e32 vcc_lo, 0xf423f, v1 global_load_b32 v0, v[4:5], off global_load_b32 v4, v[6:7], off v_add_co_u32 v2, s0, s2, v2 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, s0, s3, v3, s0 s_or_b32 s8, vcc_lo, s8 s_waitcnt vmcnt(0) v_add_nc_u32_e32 v0, v4, v0 global_store_b32 v[2:3], v0, off s_and_not1_b32 exec_lo, exec_lo, s8 s_cbranch_execnz .LBB0_2 .LBB0_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6gpuAddPiS_S_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6gpuAddPiS_S_, .Lfunc_end0-_Z6gpuAddPiS_S_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6gpuAddPiS_S_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z6gpuAddPiS_S_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00059b2a_00000000-6_async.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_ .type _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_, @function _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_: .LFB2082: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z6gpuAddPiS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_, .-_Z29__device_stub__Z6gpuAddPiS_S_PiS_S_ .globl _Z6gpuAddPiS_S_ .type _Z6gpuAddPiS_S_, @function _Z6gpuAddPiS_S_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z6gpuAddPiS_S_, .-_Z6gpuAddPiS_S_ .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "Time consumption: %lf\n" .LC2: .string "Vector addition on GPU \n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC3: .string "GPU has computed Sum Correctly\n" .align 8 .LC4: .string "There is an Error in GPU Computation, at index %d, CPU(%d)!=GPU(%d)\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 80(%rsp), %rdi call cudaStreamCreate@PLT leaq 88(%rsp), %rdi call cudaStreamCreate@PLT leaq 96(%rsp), %rdi call cudaEventCreate@PLT leaq 104(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 96(%rsp), %rdi call cudaEventRecord@PLT leaq 8(%rsp), %rdi movl $0, %edx movl $8000000, %esi call cudaHostAlloc@PLT leaq 16(%rsp), %rdi movl $0, %edx movl $8000000, %esi call cudaHostAlloc@PLT leaq 24(%rsp), %rdi movl $0, %edx movl $8000000, %esi call cudaHostAlloc@PLT movl $0, %eax .L12: movq 8(%rsp), %rdx movl %eax, (%rdx,%rax,4) movq 16(%rsp), %rdx movl %eax, (%rdx,%rax,4) addq $1, %rax cmpq $2000000, %rax jne .L12 leaq 32(%rsp), %rdi movl $4000000, %esi call cudaMalloc@PLT leaq 40(%rsp), %rdi movl $4000000, %esi call cudaMalloc@PLT leaq 48(%rsp), %rdi movl $4000000, %esi call cudaMalloc@PLT leaq 56(%rsp), %rdi movl $4000000, %esi call cudaMalloc@PLT leaq 64(%rsp), %rdi movl $4000000, %esi call cudaMalloc@PLT leaq 72(%rsp), %rdi movl $4000000, %esi call cudaMalloc@PLT movq 80(%rsp), %r8 movl $1, %ecx movl $4000000, %edx movq 8(%rsp), %rsi movq 32(%rsp), %rdi call cudaMemcpyAsync@PLT movq 8(%rsp), %rax leaq 4000000(%rax), %rsi movq 88(%rsp), %r8 movl $1, %ecx movl $4000000, %edx movq 56(%rsp), %rdi call cudaMemcpyAsync@PLT movq 80(%rsp), %r8 movl $1, %ecx movl $4000000, %edx movq 16(%rsp), %rsi movq 40(%rsp), %rdi call cudaMemcpyAsync@PLT movq 16(%rsp), %rax leaq 4000000(%rax), %rsi movq 88(%rsp), %r8 movl $1, %ecx movl $4000000, %edx movq 64(%rsp), %rdi call cudaMemcpyAsync@PLT movl $512, 124(%rsp) movl $1, 128(%rsp) movl $1, 132(%rsp) movl $512, 112(%rsp) movl $1, 116(%rsp) movl $1, 120(%rsp) movq 80(%rsp), %r9 movl $0, %r8d movq 124(%rsp), %rdx movl $1, %ecx movq 112(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L25 .L13: movl $512, 124(%rsp) movl $1, 128(%rsp) movl $1, 132(%rsp) movl $512, 112(%rsp) movl $1, 116(%rsp) movl $1, 120(%rsp) movq 88(%rsp), %r9 movl $0, %r8d movq 124(%rsp), %rdx movl $1, %ecx movq 112(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L26 .L14: movq 80(%rsp), %r8 movl $2, %ecx movl $4000000, %edx movq 48(%rsp), %rsi movq 24(%rsp), %rdi call cudaMemcpyAsync@PLT movq 24(%rsp), %rax leaq 4000000(%rax), %rdi movq 88(%rsp), %r8 movl $2, %ecx movl $4000000, %edx movq 72(%rsp), %rsi call cudaMemcpyAsync@PLT call cudaDeviceSynchronize@PLT movq 80(%rsp), %rdi call cudaStreamSynchronize@PLT movq 88(%rsp), %rdi call cudaStreamSynchronize@PLT movl $0, %esi movq 104(%rsp), %rdi call cudaEventRecord@PLT movq 104(%rsp), %rdi call cudaEventSynchronize@PLT movl $0x00000000, 124(%rsp) leaq 124(%rsp), %rdi movq 104(%rsp), %rdx movq 96(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 124(%rsp), %xmm0 leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 104(%rsp), %rdi call cudaEventDestroy@PLT movq 96(%rsp), %rdi call cudaEventDestroy@PLT leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 8(%rsp), %rdi movq 16(%rsp), %rcx movq 24(%rsp), %rsi movl $0, %eax .L16: movl (%rcx,%rax,4), %edx addl (%rdi,%rax,4), %edx cmpl (%rsi,%rax,4), %edx jne .L15 addq $1, %rax cmpq $2000000, %rax jne .L16 leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L18 .L25: movq 48(%rsp), %rdx movq 40(%rsp), %rsi movq 32(%rsp), %rdi call _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_ jmp .L13 .L26: movq 72(%rsp), %rdx movq 64(%rsp), %rsi movq 56(%rsp), %rdi call _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_ jmp .L14 .L15: movl %eax, %edx cltq movl (%rcx,%rax,4), %ecx addl (%rdi,%rax,4), %ecx movl (%rsi,%rax,4), %r8d leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L18: movq 32(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq 48(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq 48(%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFreeHost@PLT movq 16(%rsp), %rdi call cudaFreeHost@PLT movq 24(%rsp), %rdi call cudaFreeHost@PLT movq 136(%rsp), %rax subq %fs:40, %rax jne .L27 movl $0, %eax addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L27: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC5: .string "_Z6gpuAddPiS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _Z6gpuAddPiS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "async.hip" .globl _Z21__device_stub__gpuAddPiS_S_ # -- Begin function _Z21__device_stub__gpuAddPiS_S_ .p2align 4, 0x90 .type _Z21__device_stub__gpuAddPiS_S_,@function _Z21__device_stub__gpuAddPiS_S_: # @_Z21__device_stub__gpuAddPiS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6gpuAddPiS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z21__device_stub__gpuAddPiS_S_, .Lfunc_end0-_Z21__device_stub__gpuAddPiS_S_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $216, %rsp .cfi_def_cfa_offset 240 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 leaq 32(%rsp), %rdi callq hipStreamCreate leaq 24(%rsp), %rdi callq hipStreamCreate leaq 80(%rsp), %rdi callq hipEventCreate leaq 48(%rsp), %rdi callq hipEventCreate movq 80(%rsp), %rdi xorl %ebx, %ebx xorl %esi, %esi callq hipEventRecord leaq 16(%rsp), %rdi movl $8000000, %esi # imm = 0x7A1200 xorl %edx, %edx callq hipHostAlloc leaq 8(%rsp), %rdi movl $8000000, %esi # imm = 0x7A1200 xorl %edx, %edx callq hipHostAlloc leaq 40(%rsp), %rdi movl $8000000, %esi # imm = 0x7A1200 xorl %edx, %edx callq hipHostAlloc movq 16(%rsp), %rax movq 8(%rsp), %rcx .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movl %ebx, (%rax,%rbx,4) movl %ebx, (%rcx,%rbx,4) incq %rbx cmpq $2000000, %rbx # imm = 0x1E8480 jne .LBB1_1 # %bb.2: movabsq $4294967808, %rbx # imm = 0x100000200 leaq 72(%rsp), %rdi movl $4000000, %r14d # imm = 0x3D0900 movl $4000000, %esi # imm = 0x3D0900 callq hipMalloc leaq 64(%rsp), %rdi movl $4000000, %esi # imm = 0x3D0900 callq hipMalloc leaq 56(%rsp), %rdi movl $4000000, %esi # imm = 0x3D0900 callq hipMalloc leaq 208(%rsp), %rdi movl $4000000, %esi # imm = 0x3D0900 callq hipMalloc leaq 200(%rsp), %rdi movl $4000000, %esi # imm = 0x3D0900 callq hipMalloc leaq 192(%rsp), %rdi movl $4000000, %esi # imm = 0x3D0900 callq hipMalloc movq 72(%rsp), %rdi movq 16(%rsp), %rsi movq 32(%rsp), %r8 movl $4000000, %edx # imm = 0x3D0900 movl $1, %ecx callq hipMemcpyAsync movq 208(%rsp), %rdi movq 16(%rsp), %rsi addq %r14, %rsi movq 24(%rsp), %r8 movl $4000000, %edx # imm = 0x3D0900 movl $1, %ecx callq hipMemcpyAsync movq 64(%rsp), %rdi movq 8(%rsp), %rsi movq 32(%rsp), %r8 movl $4000000, %edx # imm = 0x3D0900 movl $1, %ecx callq hipMemcpyAsync movq 200(%rsp), %rdi addq 8(%rsp), %r14 movq 24(%rsp), %r8 movl $4000000, %edx # imm = 0x3D0900 movq %r14, %rsi movl $1, %ecx callq hipMemcpyAsync movq 32(%rsp), %r9 movq %rbx, %rdi movl $1, %esi movq %rbx, %rdx movl $1, %ecx xorl %r8d, %r8d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_4 # %bb.3: movq 72(%rsp), %rax movq 64(%rsp), %rcx movq 56(%rsp), %rdx movq %rax, 152(%rsp) movq %rcx, 144(%rsp) movq %rdx, 136(%rsp) leaq 152(%rsp), %rax movq %rax, 160(%rsp) leaq 144(%rsp), %rax movq %rax, 168(%rsp) leaq 136(%rsp), %rax movq %rax, 176(%rsp) leaq 120(%rsp), %rdi leaq 104(%rsp), %rsi leaq 96(%rsp), %rdx leaq 88(%rsp), %rcx callq __hipPopCallConfiguration movq 120(%rsp), %rsi movl 128(%rsp), %edx movq 104(%rsp), %rcx movl 112(%rsp), %r8d leaq 160(%rsp), %r9 movl $_Z6gpuAddPiS_S_, %edi pushq 88(%rsp) .cfi_adjust_cfa_offset 8 pushq 104(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_4: movq 24(%rsp), %r9 xorl %r14d, %r14d movq %rbx, %rdi movl $1, %esi movq %rbx, %rdx movl $1, %ecx xorl %r8d, %r8d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_6 # %bb.5: movq 208(%rsp), %rax movq 200(%rsp), %rcx movq 192(%rsp), %rdx movq %rax, 152(%rsp) movq %rcx, 144(%rsp) movq %rdx, 136(%rsp) leaq 152(%rsp), %rax movq %rax, 160(%rsp) leaq 144(%rsp), %rax movq %rax, 168(%rsp) leaq 136(%rsp), %rax movq %rax, 176(%rsp) leaq 120(%rsp), %rdi leaq 104(%rsp), %rsi leaq 96(%rsp), %rdx leaq 88(%rsp), %rcx callq __hipPopCallConfiguration movq 120(%rsp), %rsi movl 128(%rsp), %edx movq 104(%rsp), %rcx movl 112(%rsp), %r8d leaq 160(%rsp), %r9 movl $_Z6gpuAddPiS_S_, %edi pushq 88(%rsp) .cfi_adjust_cfa_offset 8 pushq 104(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_6: movq 40(%rsp), %rdi movq 56(%rsp), %rsi movq 32(%rsp), %r8 movl $4000000, %ebx # imm = 0x3D0900 movl $4000000, %edx # imm = 0x3D0900 movl $2, %ecx callq hipMemcpyAsync addq 40(%rsp), %rbx movq 192(%rsp), %rsi movq 24(%rsp), %r8 movl $4000000, %edx # imm = 0x3D0900 movq %rbx, %rdi movl $2, %ecx callq hipMemcpyAsync callq hipDeviceSynchronize movq 32(%rsp), %rdi callq hipStreamSynchronize movq 24(%rsp), %rdi callq hipStreamSynchronize movq 48(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 48(%rsp), %rdi callq hipEventSynchronize movl $0, 160(%rsp) movq 80(%rsp), %rsi movq 48(%rsp), %rdx leaq 160(%rsp), %rdi callq hipEventElapsedTime movss 160(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf movq 48(%rsp), %rdi callq hipEventDestroy movq 80(%rsp), %rdi callq hipEventDestroy movl $.Lstr, %edi callq puts@PLT movq 16(%rsp), %rax movq 8(%rsp), %rcx movl (%rcx), %edi addl (%rax), %edi movq 40(%rsp), %rdx movl $0, %esi cmpl (%rdx), %edi jne .LBB1_11 # %bb.7: # %.lr.ph.preheader xorl %esi, %esi .p2align 4, 0x90 .LBB1_9: # %.lr.ph # =>This Inner Loop Header: Depth=1 cmpq $1999999, %rsi # imm = 0x1E847F je .LBB1_10 # %bb.8: # in Loop: Header=BB1_9 Depth=1 movl 4(%rcx,%rsi,4), %edi addl 4(%rax,%rsi,4), %edi leaq 1(%rsi), %r14 cmpl 4(%rdx,%rsi,4), %edi movq %r14, %rsi je .LBB1_9 # %bb.15: # %._crit_edge leaq -1(%r14), %rax cmpq $1999999, %rax # imm = 0x1E847F setae %sil .LBB1_11: # %._crit_edge69 testb %sil, %sil je .LBB1_13 .LBB1_12: movl $.Lstr.1, %edi callq puts@PLT jmp .LBB1_14 .LBB1_10: # %._crit_edge69.loopexit setae %sil movl $-1, %r14d testb %sil, %sil jne .LBB1_12 .LBB1_13: movq 16(%rsp), %rax movslq %r14d, %rsi movq 8(%rsp), %rcx movl (%rcx,%rsi,4), %edx addl (%rax,%rsi,4), %edx movq 40(%rsp), %rax movl (%rax,%rsi,4), %ecx movl $.L.str.3, %edi # kill: def $esi killed $esi killed $rsi xorl %eax, %eax callq printf .LBB1_14: movq 72(%rsp), %rdi callq hipFree movq 64(%rsp), %rdi callq hipFree movq 56(%rsp), %rdi callq hipFree movq 72(%rsp), %rdi callq hipFree movq 64(%rsp), %rdi callq hipFree movq 56(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipHostFree movq 8(%rsp), %rdi callq hipHostFree movq 40(%rsp), %rdi callq hipHostFree xorl %eax, %eax addq $216, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6gpuAddPiS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z6gpuAddPiS_S_,@object # @_Z6gpuAddPiS_S_ .section .rodata,"a",@progbits .globl _Z6gpuAddPiS_S_ .p2align 3, 0x0 _Z6gpuAddPiS_S_: .quad _Z21__device_stub__gpuAddPiS_S_ .size _Z6gpuAddPiS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Time consumption: %lf\n" .size .L.str, 23 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "There is an Error in GPU Computation, at index %d, CPU(%d)!=GPU(%d)\n" .size .L.str.3, 69 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6gpuAddPiS_S_" .size .L__unnamed_1, 16 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Vector addition on GPU " .size .Lstr, 24 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "GPU has computed Sum Correctly" .size .Lstr.1, 31 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__gpuAddPiS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6gpuAddPiS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <cstdlib> #include <time.h> #include <stdlib.h> #include <math.h> /* Authors: Eric Sheeder, Gokul Natesan, Jacob Hollister Parallel Computing Final Project This code generates 2 large matrices and multiplies them, once on the GPU and once on the CPU It expects 3 variables on the command line, n, m, and p, where n and m are the dimensions of A and m and p are the dimensions of B This code currently only works accurately when n, m, and p are all multiples of 32 Algorithm was learned and taken from this paper: https://webs.um.es/jmgarcia/miwiki/lib/exe/fetch.php?id=pubs&cache=cache&media=parco09.pdf */ // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA //#include <helper_functions.h> //This function computes the final values for a single tile in the output matrix (C) __global__ void compute_tile(int *d_A, int *d_B, int *d_C_gpu, int n, int m, int p) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; //Chunk of 32x32 values loaded from A and B __shared__ float A[32][32]; __shared__ float B[32][32]; //Offset into A and B for each thread so it knows which piece of data to load into the shared arrays int indexA = by * (m*blockDim.y) + ty * m + tx; int indexB = bx*blockDim.x + ty*p + tx; //Each thread keeps track of its own sum in this variable int sum = 0; //Run through multiple tiles in A and B to compute our values for our tile in C for (int i = 0; i < p; i+=blockDim.x) { //Have each thread load a value into A and B A[ty][tx] = d_A[indexA]; B[ty][tx] = d_B[indexB]; //Synch threads so all threads wait until all data is loaded before we start calculating __syncthreads(); //Have each thread run through the section of A and B we are at, doing 32 multiplications and summing them for (int j = 0; j < blockDim.x; j++) { sum += A[ty][j] * B[j][tx]; } //Synch threads again so we know each thread is ready to move on to the next part of A and Block __syncthreads(); indexA += blockDim.x; indexB += p*blockDim.x; } //Each thread should now have a complete value for its part in C, so figure out where it should go and store it int indexC = bx * blockDim.x + by * (p * blockDim.y) + p * ty + tx; d_C_gpu[indexC] = sum; } int main (int argc, char *argv[]) { int *A, *B, *C_cpu, *C_gpu; // matrices int n, m, p; // dimensions of matrices srand(time(NULL)); //random numbers each time clock_t cpu_start_time, cpu_end_time; cudaEvent_t gpu_start_time, gpu_end_time; double cpu_total_time; float gpu_total_time; //Make sure user puts in right parameters if (argc !=4) { printf("Usage: <./executable.exe n m p>, where n, m, and p are the dimensions of A (nxm) and B (mxp)"); exit(1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); // Read number of rows (nr), number of columns (nc) and // number of elements and allocate memory for row_ptr, indices, data, b and c. unsigned int mem_size_A = (n*m)*sizeof(int); A = (int *) malloc (mem_size_A); unsigned int mem_size_B = (m*p)*sizeof(int); B = (int *) malloc (mem_size_B); unsigned int mem_size_C = (n*p)*sizeof(int); C_cpu = (int *) malloc (mem_size_C); C_gpu = (int *) malloc (mem_size_C); // Fill A with randomly generated data for (int i=0; i<n*m; i++) { int someInt = 1 + rand() % 10; A[i] = someInt; } // Fill B with randomly generated data for (int i=0; i<m*p; i++) { int someInt = 1 + rand() % 10; B[i] = someInt; } // Fill C with 0s for (int i=0; i<n*p; i++) { C_cpu[i] = 0; C_gpu[i] = 0; } // Allocate device memory int *d_A, *d_B, *d_C_gpu; cudaError_t error; error = cudaMalloc((void **) &d_A, mem_size_A); error = cudaMalloc((void **) &d_B, mem_size_B); error = cudaMalloc((void **) &d_C_gpu, mem_size_C); // copy host memory to device error = cudaMemcpy(d_A, A, mem_size_A, cudaMemcpyHostToDevice); error = cudaMemcpy(d_B, B, mem_size_B, cudaMemcpyHostToDevice); error = cudaMemcpy(d_C_gpu, C_gpu, mem_size_C, cudaMemcpyHostToDevice); // Setup execution parameters for parallel dim3 tile_size(32, 32); dim3 num_blocks(n/tile_size.x + (n % tile_size.x != 0), p/tile_size.y + (p % tile_size.y != 0)); //Execute parallel code error = cudaEventCreate(&gpu_start_time); error = cudaEventCreate(&gpu_end_time); error = cudaEventRecord(gpu_start_time, NULL); //compute_tile<<<num_blocks, tile_size>>>(d_A, d_B, d_C_gpu, n, m, p); error = cudaEventRecord(gpu_end_time, NULL); error = cudaEventSynchronize(gpu_end_time); error = cudaEventElapsedTime(&gpu_total_time, gpu_start_time, gpu_end_time); cpu_start_time = clock(); // MAIN COMPUTATION, SEQUENTIAL VERSION for (int row=0; row < n; row++) { for (int col = 0; col < p; col++) { int sum = 0; for (int i = 0; i < m; i++) { sum += A[row*m + i] * B[i*p + col]; } C_cpu[row*p + col] = sum; } } cpu_end_time = clock(); cpu_total_time = ((double) (cpu_end_time - cpu_start_time)) / CLOCKS_PER_SEC; //Copy result from device to host error = cudaMemcpy(C_gpu, d_C_gpu, mem_size_C, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); //Print values in our 2 c vectors to output.txt //Code taken from http://www.tutorialspoint.com/cprogramming/c_file_io.htm /*FILE *fpout; fpout = fopen("output.txt", "w+"); fprintf(fpout, "index\tCPU\tGPU\tDifference\n"); for (int i = 0; i < n; i++) { int difference = A_cpu[i] - A_gpu[i]; fprintf(fpout, "%i\t%i\t%i\t%i\n", i, A_cpu[i], A_gpu[i], difference); } fclose(fpout);*/ //Print out matrix A /*printf("Matrix A:\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < m; col++) { printf("%i ", A[row*n + col]); } printf("\n"); }*/ //Print out matrix B /*printf("Matrix B:\n"); for (int row = 0; row < m; row++) { for (int col = 0; col < p; col++) { printf("%i ", B[row*n + col]); } printf("\n"); }*/ //Print out matrix C (CPU) /*printf("Matrix C (CPU):\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { printf("%i ", C_cpu[row*n + col]); } printf("\n"); }*/ //Print out matrix C (GPU) /*printf("Matrix C (GPU):\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { printf("%i ", C_gpu[row*n + col]); } printf("\n"); }*/ //Find discrepencies /*for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { if (C_gpu[row*n + col] != C_cpu[row*n + col]) { printf("Error: C_gpu[%i] = %i, C_cpu[%i] = %i\n", row*n+col, C_gpu[row*n + col], row*n+col, C_cpu[row*n + col]); } } }*/ //Print performance time printf("CPU time was %f seconds\n", cpu_total_time); //printf("GPU time was %f milliseconds\n", gpu_total_time); // Clean up memory free(A); free(B); free(C_cpu); free(C_gpu); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C_gpu); cudaDeviceReset(); return 0; }
code for sm_80 Function : _Z12compute_tilePiS_S_iii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e220000002100 */ /*0020*/ MOV R4, c[0x0][0x180] ; /* 0x0000600000047a02 */ /* 0x000fe20000000f00 */ /*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fe20000000a00 */ /*0040*/ IMAD.MOV.U32 R20, RZ, RZ, RZ ; /* 0x000000ffff147224 */ /* 0x000fe200078e00ff */ /*0050*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e220000002500 */ /*0060*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */ /* 0x000fc60003f06270 */ /*0070*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */ /* 0x000e680000002600 */ /*0080*/ S2R R13, SR_TID.Y ; /* 0x00000000000d7919 */ /* 0x000ea20000002200 */ /*0090*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */ /* 0x001fe400078e0200 */ /*00a0*/ IMAD R2, R2, c[0x0][0x4], RZ ; /* 0x0000010002027a24 */ /* 0x002fe400078e02ff */ /*00b0*/ IMAD R3, R13, c[0x0][0x180], R3 ; /* 0x000060000d037a24 */ /* 0x004fe200078e0203 */ /*00c0*/ @!P0 BRA 0xe20 ; /* 0x00000d5000008947 */ /* 0x000fea0003800000 */ /*00d0*/ MOV R12, c[0x0][0x0] ; /* 0x00000000000c7a02 */ /* 0x000fe20000000f00 */ /*00e0*/ HFMA2.MMA R6, -RZ, RZ, 0, 0 ; /* 0x00000000ff067435 */ /* 0x000fe200000001ff */ /*00f0*/ SHF.L.U32 R5, R13, 0x7, RZ ; /* 0x000000070d057819 */ /* 0x000fe200000006ff */ /*0100*/ IMAD.IADD R13, R2, 0x1, R13 ; /* 0x00000001020d7824 */ /* 0x000fe200078e020d */ /*0110*/ IADD3 R4, R12.reuse, -0x1, RZ ; /* 0xffffffff0c047810 */ /* 0x040fe20007ffe0ff */ /*0120*/ IMAD.MOV.U32 R7, RZ, RZ, R3 ; /* 0x000000ffff077224 */ /* 0x000fe200078e0003 */ /*0130*/ LOP3.LUT R12, R12, 0x3, RZ, 0xc0, !PT ; /* 0x000000030c0c7812 */ /* 0x000fe200078ec0ff */ /*0140*/ IMAD R13, R13, c[0x0][0x17c], R0 ; /* 0x00005f000d0d7a24 */ /* 0x000fe200078e0200 */ /*0150*/ ISETP.GE.U32.AND P2, PT, R4, 0x3, PT ; /* 0x000000030400780c */ /* 0x000fc40003f46070 */ /*0160*/ LEA R4, R0.reuse, 0x1100, 0x2 ; /* 0x0000110000047811 */ /* 0x040fe400078e10ff */ /*0170*/ MOV R20, RZ ; /* 0x000000ff00147202 */ /* 0x000fe40000000f00 */ /*0180*/ LEA R14, R0, R5, 0x2 ; /* 0x00000005000e7211 */ /* 0x000fe400078e10ff */ /*0190*/ IADD3 R15, R5, 0x8, RZ ; /* 0x00000008050f7810 */ /* 0x000fe40007ffe0ff */ /*01a0*/ IADD3 R16, -R12, c[0x0][0x0], RZ ; /* 0x000000000c107a10 */ /* 0x000fe40007ffe1ff */ /*01b0*/ MOV R8, 0x4 ; /* 0x0000000400087802 */ /* 0x000fca0000000f00 */ /*01c0*/ IMAD.WIDE R10, R13, R8, c[0x0][0x160] ; /* 0x000058000d0a7625 */ /* 0x000fc800078e0208 */ /*01d0*/ IMAD.WIDE R8, R7, R8, c[0x0][0x168] ; /* 0x00005a0007087625 */ /* 0x000fe400078e0208 */ /*01e0*/ LDG.E R10, [R10.64] ; /* 0x000000060a0a7981 */ /* 0x000ea8000c1e1900 */ /*01f0*/ LDG.E R8, [R8.64] ; /* 0x0000000608087981 */ /* 0x000ee2000c1e1900 */ /*0200*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x0], PT ; /* 0x00000000ff007a0c */ /* 0x000fe40003f05270 */ /*0210*/ IADD3 R6, R6, c[0x0][0x0], RZ ; /* 0x0000000006067a10 */ /* 0x000fc80007ffe0ff */ /*0220*/ ISETP.GE.AND P1, PT, R6, c[0x0][0x180], PT ; /* 0x0000600006007a0c */ /* 0x000fe20003f26270 */ /*0230*/ I2F R17, R10 ; /* 0x0000000a00117306 */ /* 0x004e300000201400 */ /*0240*/ I2F R19, R8 ; /* 0x0000000800137306 */ /* 0x00ae620000201400 */ /*0250*/ STS [R14], R17 ; /* 0x000000110e007388 */ /* 0x0011e80000000800 */ /*0260*/ STS [R14+0x1000], R19 ; /* 0x001000130e007388 */ /* 0x0021e80000000800 */ /*0270*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0280*/ @!P0 BRA 0xdd0 ; /* 0x00000b4000008947 */ /* 0x000fea0003800000 */ /*0290*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x001fe20008000000 */ /*02a0*/ @!P2 BRA 0xc80 ; /* 0x000009d00000a947 */ /* 0x000fea0003800000 */ /*02b0*/ ISETP.GT.AND P0, PT, R16, RZ, PT ; /* 0x000000ff1000720c */ /* 0x000fe20003f04270 */ /*02c0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fe20008000000 */ /*02d0*/ IMAD.MOV.U32 R19, RZ, RZ, R15 ; /* 0x000000ffff137224 */ /* 0x000fe200078e000f */ /*02e0*/ MOV R18, R4 ; /* 0x0000000400127202 */ /* 0x000fc40000000f00 */ /*02f0*/ MOV R17, R16 ; /* 0x0000001000117202 */ /* 0x000fd00000000f00 */ /*0300*/ @!P0 BRA 0xb00 ; /* 0x000007f000008947 */ /* 0x000fea0003800000 */ /*0310*/ ISETP.GT.AND P3, PT, R17, 0xc, PT ; /* 0x0000000c1100780c */ /* 0x000fe40003f64270 */ /*0320*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*0330*/ @!P3 BRA 0x830 ; /* 0x000004f00000b947 */ /* 0x000fea0003800000 */ /*0340*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0350*/ LDS R9, [R18+-0x100] ; /* 0xffff000012097984 */ /* 0x000fe20000000800 */ /*0360*/ I2F R20, R20 ; /* 0x0000001400147306 */ /* 0x001e220000201400 */ /*0370*/ IADD3 R17, R17, -0x10, RZ ; /* 0xfffffff011117810 */ /* 0x000fe20007ffe0ff */ /*0380*/ UIADD3 UR4, UR4, 0x10, URZ ; /* 0x0000001004047890 */ /* 0x000fe2000fffe03f */ /*0390*/ LDS.64 R10, [R19+-0x8] ; /* 0xfffff800130a7984 */ /* 0x000e240000000a00 */ /*03a0*/ ISETP.GT.AND P3, PT, R17, 0xc, PT ; /* 0x0000000c1100780c */ /* 0x000fe40003f64270 */ /*03b0*/ LDS R22, [R18+-0x80] ; /* 0xffff800012167984 */ /* 0x000e680000000800 */ /*03c0*/ LDS R23, [R18] ; /* 0x0000000012177984 */ /* 0x000fe20000000800 */ /*03d0*/ FFMA R10, R9, R10, R20 ; /* 0x0000000a090a7223 */ /* 0x001fc60000000014 */ /*03e0*/ LDS.64 R8, [R19] ; /* 0x0000000013087984 */ /* 0x000e260000000a00 */ /*03f0*/ F2I.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000eb0000020f100 */ /*0400*/ I2F R21, R10 ; /* 0x0000000a00157306 */ /* 0x004e640000201400 */ /*0410*/ FFMA R21, R22, R11, R21 ; /* 0x0000000b16157223 */ /* 0x002fc40000000015 */ /*0420*/ LDS R22, [R18+0x80] ; /* 0x0000800012167984 */ /* 0x000e680000000800 */ /*0430*/ F2I.TRUNC.NTZ R21, R21 ; /* 0x0000001500157305 */ /* 0x000eb0000020f100 */ /*0440*/ I2F R11, R21 ; /* 0x00000015000b7306 */ /* 0x004e240000201400 */ /*0450*/ FFMA R8, R23, R8, R11 ; /* 0x0000000817087223 */ /* 0x001fc4000000000b */ /*0460*/ LDS R23, [R18+0x100] ; /* 0x0001000012177984 */ /* 0x000fe80000000800 */ /*0470*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e22000020f100 */ /*0480*/ LDS.64 R10, [R19+0x8] ; /* 0x00000800130a7984 */ /* 0x000eae0000000a00 */ /*0490*/ I2F R20, R8 ; /* 0x0000000800147306 */ /* 0x001e640000201400 */ /*04a0*/ FFMA R20, R22, R9, R20 ; /* 0x0000000916147223 */ /* 0x002fc40000000014 */ /*04b0*/ LDS R22, [R18+0x180] ; /* 0x0001800012167984 */ /* 0x000e280000000800 */ /*04c0*/ F2I.TRUNC.NTZ R20, R20 ; /* 0x0000001400147305 */ /* 0x000e70000020f100 */ /*04d0*/ I2F R9, R20 ; /* 0x0000001400097306 */ /* 0x002ea40000201400 */ /*04e0*/ FFMA R10, R23, R10, R9 ; /* 0x0000000a170a7223 */ /* 0x004fc40000000009 */ /*04f0*/ LDS R23, [R18+0x200] ; /* 0x0002000012177984 */ /* 0x000fe80000000800 */ /*0500*/ F2I.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000e62000020f100 */ /*0510*/ LDS.64 R8, [R19+0x10] ; /* 0x0000100013087984 */ /* 0x000eae0000000a00 */ /*0520*/ I2F R21, R10 ; /* 0x0000000a00157306 */ /* 0x002e240000201400 */ /*0530*/ FFMA R21, R22, R11, R21 ; /* 0x0000000b16157223 */ /* 0x001fc40000000015 */ /*0540*/ LDS R22, [R18+0x280] ; /* 0x0002800012167984 */ /* 0x000e280000000800 */ /*0550*/ F2I.TRUNC.NTZ R21, R21 ; /* 0x0000001500157305 */ /* 0x000e70000020f100 */ /*0560*/ I2F R11, R21 ; /* 0x00000015000b7306 */ /* 0x002ea40000201400 */ /*0570*/ FFMA R8, R23, R8, R11 ; /* 0x0000000817087223 */ /* 0x004fc4000000000b */ /*0580*/ LDS R23, [R18+0x300] ; /* 0x0003000012177984 */ /* 0x000fe80000000800 */ /*0590*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e62000020f100 */ /*05a0*/ LDS.64 R10, [R19+0x18] ; /* 0x00001800130a7984 */ /* 0x000eae0000000a00 */ /*05b0*/ I2F R20, R8 ; /* 0x0000000800147306 */ /* 0x002e240000201400 */ /*05c0*/ FFMA R20, R22, R9, R20 ; /* 0x0000000916147223 */ /* 0x001fc40000000014 */ /*05d0*/ LDS R22, [R18+0x380] ; /* 0x0003800012167984 */ /* 0x000e280000000800 */ /*05e0*/ F2I.TRUNC.NTZ R20, R20 ; /* 0x0000001400147305 */ /* 0x000e70000020f100 */ /*05f0*/ I2F R9, R20 ; /* 0x0000001400097306 */ /* 0x002ea40000201400 */ /*0600*/ FFMA R10, R23, R10, R9 ; /* 0x0000000a170a7223 */ /* 0x004fc40000000009 */ /*0610*/ LDS R23, [R18+0x400] ; /* 0x0004000012177984 */ /* 0x000fe80000000800 */ /*0620*/ F2I.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000e62000020f100 */ /*0630*/ LDS.64 R8, [R19+0x20] ; /* 0x0000200013087984 */ /* 0x000eae0000000a00 */ /*0640*/ I2F R21, R10 ; /* 0x0000000a00157306 */ /* 0x002e240000201400 */ /*0650*/ FFMA R21, R22, R11, R21 ; /* 0x0000000b16157223 */ /* 0x001fc40000000015 */ /*0660*/ LDS R22, [R18+0x480] ; /* 0x0004800012167984 */ /* 0x000e280000000800 */ /*0670*/ F2I.TRUNC.NTZ R21, R21 ; /* 0x0000001500157305 */ /* 0x000e70000020f100 */ /*0680*/ I2F R11, R21 ; /* 0x00000015000b7306 */ /* 0x002ea40000201400 */ /*0690*/ FFMA R8, R23, R8, R11 ; /* 0x0000000817087223 */ /* 0x004fc4000000000b */ /*06a0*/ LDS R23, [R18+0x500] ; /* 0x0005000012177984 */ /* 0x000fe80000000800 */ /*06b0*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e62000020f100 */ /*06c0*/ LDS.64 R10, [R19+0x28] ; /* 0x00002800130a7984 */ /* 0x000eae0000000a00 */ /*06d0*/ I2F R20, R8 ; /* 0x0000000800147306 */ /* 0x002e240000201400 */ /*06e0*/ FFMA R20, R22, R9, R20 ; /* 0x0000000916147223 */ /* 0x001fc40000000014 */ /*06f0*/ LDS R22, [R18+0x580] ; /* 0x0005800012167984 */ /* 0x000e280000000800 */ /*0700*/ F2I.TRUNC.NTZ R20, R20 ; /* 0x0000001400147305 */ /* 0x000e70000020f100 */ /*0710*/ I2F R9, R20 ; /* 0x0000001400097306 */ /* 0x002ea40000201400 */ /*0720*/ FFMA R10, R23, R10, R9 ; /* 0x0000000a170a7223 */ /* 0x004fc40000000009 */ /*0730*/ LDS R23, [R18+0x600] ; /* 0x0006000012177984 */ /* 0x000fe80000000800 */ /*0740*/ F2I.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000e62000020f100 */ /*0750*/ LDS.64 R8, [R19+0x30] ; /* 0x0000300013087984 */ /* 0x0004e40000000a00 */ /*0760*/ IADD3 R19, R19, 0x40, RZ ; /* 0x0000004013137810 */ /* 0x004fca0007ffe0ff */ /*0770*/ I2F R21, R10 ; /* 0x0000000a00157306 */ /* 0x002e240000201400 */ /*0780*/ FFMA R11, R22, R11, R21 ; /* 0x0000000b160b7223 */ /* 0x001fe40000000015 */ /*0790*/ LDS R22, [R18+0x680] ; /* 0x0006800012167984 */ /* 0x0000680000000800 */ /*07a0*/ F2I.TRUNC.NTZ R11, R11 ; /* 0x0000000b000b7305 */ /* 0x000ea2000020f100 */ /*07b0*/ IADD3 R18, R18, 0x800, RZ ; /* 0x0000080012127810 */ /* 0x001fce0007ffe0ff */ /*07c0*/ I2F R21, R11 ; /* 0x0000000b00157306 */ /* 0x004ee40000201400 */ /*07d0*/ FFMA R8, R23, R8, R21 ; /* 0x0000000817087223 */ /* 0x008fcc0000000015 */ /*07e0*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e30000020f100 */ /*07f0*/ I2F R20, R8 ; /* 0x0000000800147306 */ /* 0x001e640000201400 */ /*0800*/ FFMA R20, R22, R9, R20 ; /* 0x0000000916147223 */ /* 0x002fcc0000000014 */ /*0810*/ F2I.TRUNC.NTZ R20, R20 ; /* 0x0000001400147305 */ /* 0x000e22000020f100 */ /*0820*/ @P3 BRA 0x350 ; /* 0xfffffb2000003947 */ /* 0x000fea000383ffff */ /*0830*/ ISETP.GT.AND P3, PT, R17, 0x4, PT ; /* 0x000000041100780c */ /* 0x000fda0003f64270 */ /*0840*/ @!P3 BRA 0xae0 ; /* 0x000002900000b947 */ /* 0x000fea0003800000 */ /*0850*/ LDS R9, [R18+-0x100] ; /* 0xffff000012097984 */ /* 0x000fe20000000800 */ /*0860*/ I2F R20, R20 ; /* 0x0000001400147306 */ /* 0x001e220000201400 */ /*0870*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe20003f0e170 */ /*0880*/ UIADD3 UR4, UR4, 0x8, URZ ; /* 0x0000000804047890 */ /* 0x000fe2000fffe03f */ /*0890*/ LDS.64 R10, [R19+-0x8] ; /* 0xfffff800130a7984 */ /* 0x000e220000000a00 */ /*08a0*/ IADD3 R17, R17, -0x8, RZ ; /* 0xfffffff811117810 */ /* 0x000fc60007ffe0ff */ /*08b0*/ LDS R22, [R18+-0x80] ; /* 0xffff800012167984 */ /* 0x000e680000000800 */ /*08c0*/ LDS R23, [R18] ; /* 0x0000000012177984 */ /* 0x000fe20000000800 */ /*08d0*/ FFMA R10, R9, R10, R20 ; /* 0x0000000a090a7223 */ /* 0x001fc60000000014 */ /*08e0*/ LDS.64 R8, [R19] ; /* 0x0000000013087984 */ /* 0x000e260000000a00 */ /*08f0*/ F2I.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000eb0000020f100 */ /*0900*/ I2F R21, R10 ; /* 0x0000000a00157306 */ /* 0x004e640000201400 */ /*0910*/ FFMA R21, R22, R11, R21 ; /* 0x0000000b16157223 */ /* 0x002fc40000000015 */ /*0920*/ LDS R22, [R18+0x80] ; /* 0x0000800012167984 */ /* 0x000e680000000800 */ /*0930*/ F2I.TRUNC.NTZ R21, R21 ; /* 0x0000001500157305 */ /* 0x000eb0000020f100 */ /*0940*/ I2F R11, R21 ; /* 0x00000015000b7306 */ /* 0x004e240000201400 */ /*0950*/ FFMA R8, R23, R8, R11 ; /* 0x0000000817087223 */ /* 0x001fc4000000000b */ /*0960*/ LDS R23, [R18+0x100] ; /* 0x0001000012177984 */ /* 0x000fe80000000800 */ /*0970*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e22000020f100 */ /*0980*/ LDS.64 R10, [R19+0x8] ; /* 0x00000800130a7984 */ /* 0x000eae0000000a00 */ /*0990*/ I2F R20, R8 ; /* 0x0000000800147306 */ /* 0x001e640000201400 */ /*09a0*/ FFMA R20, R22, R9, R20 ; /* 0x0000000916147223 */ /* 0x002fc40000000014 */ /*09b0*/ LDS R22, [R18+0x180] ; /* 0x0001800012167984 */ /* 0x000e280000000800 */ /*09c0*/ F2I.TRUNC.NTZ R20, R20 ; /* 0x0000001400147305 */ /* 0x000e70000020f100 */ /*09d0*/ I2F R9, R20 ; /* 0x0000001400097306 */ /* 0x002ea40000201400 */ /*09e0*/ FFMA R10, R23, R10, R9 ; /* 0x0000000a170a7223 */ /* 0x004fc40000000009 */ /*09f0*/ LDS R23, [R18+0x200] ; /* 0x0002000012177984 */ /* 0x000fe80000000800 */ /*0a00*/ F2I.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000e62000020f100 */ /*0a10*/ LDS.64 R8, [R19+0x10] ; /* 0x0000100013087984 */ /* 0x0004e40000000a00 */ /*0a20*/ IADD3 R19, R19, 0x20, RZ ; /* 0x0000002013137810 */ /* 0x004fca0007ffe0ff */ /*0a30*/ I2F R21, R10 ; /* 0x0000000a00157306 */ /* 0x002e240000201400 */ /*0a40*/ FFMA R11, R22, R11, R21 ; /* 0x0000000b160b7223 */ /* 0x001fe40000000015 */ /*0a50*/ LDS R22, [R18+0x280] ; /* 0x0002800012167984 */ /* 0x0000680000000800 */ /*0a60*/ F2I.TRUNC.NTZ R11, R11 ; /* 0x0000000b000b7305 */ /* 0x000ea2000020f100 */ /*0a70*/ IADD3 R18, R18, 0x400, RZ ; /* 0x0000040012127810 */ /* 0x001fce0007ffe0ff */ /*0a80*/ I2F R21, R11 ; /* 0x0000000b00157306 */ /* 0x004ee40000201400 */ /*0a90*/ FFMA R8, R23, R8, R21 ; /* 0x0000000817087223 */ /* 0x008fcc0000000015 */ /*0aa0*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e30000020f100 */ /*0ab0*/ I2F R20, R8 ; /* 0x0000000800147306 */ /* 0x001e640000201400 */ /*0ac0*/ FFMA R20, R22, R9, R20 ; /* 0x0000000916147223 */ /* 0x002fcc0000000014 */ /*0ad0*/ F2I.TRUNC.NTZ R20, R20 ; /* 0x0000001400147305 */ /* 0x000e24000020f100 */ /*0ae0*/ ISETP.NE.OR P0, PT, R17, RZ, P0 ; /* 0x000000ff1100720c */ /* 0x000fda0000705670 */ /*0af0*/ @!P0 BRA 0xc80 ; /* 0x0000018000008947 */ /* 0x000fea0003800000 */ /*0b00*/ LDS R11, [R18+-0x100] ; /* 0xffff0000120b7984 */ /* 0x000fe20000000800 */ /*0b10*/ I2F R20, R20 ; /* 0x0000001400147306 */ /* 0x001e220000201400 */ /*0b20*/ IADD3 R17, R17, -0x4, RZ ; /* 0xfffffffc11117810 */ /* 0x000fe20007ffe0ff */ /*0b30*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */ /* 0x000fe2000fffe03f */ /*0b40*/ LDS.64 R8, [R19+-0x8] ; /* 0xfffff80013087984 */ /* 0x000e240000000a00 */ /*0b50*/ ISETP.NE.AND P0, PT, R17, RZ, PT ; /* 0x000000ff1100720c */ /* 0x000fe40003f05270 */ /*0b60*/ LDS R22, [R18+-0x80] ; /* 0xffff800012167984 */ /* 0x000e680000000800 */ /*0b70*/ LDS R23, [R18] ; /* 0x0000000012177984 */ /* 0x000fe20000000800 */ /*0b80*/ FFMA R8, R11, R8, R20 ; /* 0x000000080b087223 */ /* 0x001fc60000000014 */ /*0b90*/ LDS.64 R10, [R19] ; /* 0x00000000130a7984 */ /* 0x0000a60000000a00 */ /*0ba0*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000ee2000020f100 */ /*0bb0*/ IADD3 R19, R19, 0x10, RZ ; /* 0x0000001013137810 */ /* 0x001fce0007ffe0ff */ /*0bc0*/ I2F R21, R8 ; /* 0x0000000800157306 */ /* 0x008e640000201400 */ /*0bd0*/ FFMA R9, R22, R9, R21 ; /* 0x0000000916097223 */ /* 0x002fe40000000015 */ /*0be0*/ LDS R22, [R18+0x80] ; /* 0x0000800012167984 */ /* 0x0000680000000800 */ /*0bf0*/ F2I.TRUNC.NTZ R9, R9 ; /* 0x0000000900097305 */ /* 0x000ee2000020f100 */ /*0c00*/ IADD3 R18, R18, 0x200, RZ ; /* 0x0000020012127810 */ /* 0x001fce0007ffe0ff */ /*0c10*/ I2F R21, R9 ; /* 0x0000000900157306 */ /* 0x008ea40000201400 */ /*0c20*/ FFMA R10, R23, R10, R21 ; /* 0x0000000a170a7223 */ /* 0x004fcc0000000015 */ /*0c30*/ F2I.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000e30000020f100 */ /*0c40*/ I2F R20, R10 ; /* 0x0000000a00147306 */ /* 0x001e640000201400 */ /*0c50*/ FFMA R20, R22, R11, R20 ; /* 0x0000000b16147223 */ /* 0x002fcc0000000014 */ /*0c60*/ F2I.TRUNC.NTZ R20, R20 ; /* 0x0000001400147305 */ /* 0x000e24000020f100 */ /*0c70*/ @P0 BRA 0xb00 ; /* 0xfffffe8000000947 */ /* 0x001fea000383ffff */ /*0c80*/ ISETP.NE.AND P0, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */ /* 0x000fda0003f05270 */ /*0c90*/ @!P0 BRA 0xdd0 ; /* 0x0000013000008947 */ /* 0x000fea0003800000 */ /*0ca0*/ IMAD.U32 R8, RZ, RZ, UR4 ; /* 0x00000004ff087e24 */ /* 0x000fe2000f8e00ff */ /*0cb0*/ ULEA UR5, UR4, 0x1000, 0x7 ; /* 0x0000100004057891 */ /* 0x000fe2000f8e383f */ /*0cc0*/ I2F R20, R20 ; /* 0x0000001400147306 */ /* 0x001e220000201400 */ /*0cd0*/ ISETP.NE.AND P0, PT, R12, 0x1, PT ; /* 0x000000010c00780c */ /* 0x000fe40003f05270 */ /*0ce0*/ LEA R9, R8, R5, 0x2 ; /* 0x0000000508097211 */ /* 0x000fca00078e10ff */ /*0cf0*/ LDS R17, [R0.X4+UR5] ; /* 0x0000000500117984 */ /* 0x000fe80008004800 */ /*0d00*/ LDS.128 R8, [R9] ; /* 0x0000000009087984 */ /* 0x000e240000000c00 */ /*0d10*/ FFMA R8, R17, R8, R20 ; /* 0x0000000811087223 */ /* 0x001fc80000000014 */ /*0d20*/ F2I.TRUNC.NTZ R20, R8 ; /* 0x0000000800147305 */ /* 0x000062000020f100 */ /*0d30*/ @!P0 BRA 0xdd0 ; /* 0x0000009000008947 */ /* 0x000fea0003800000 */ /*0d40*/ LDS R8, [R0.X4+UR5+0x80] ; /* 0x0000800500087984 */ /* 0x001e220008004800 */ /*0d50*/ ISETP.NE.AND P0, PT, R12, 0x2, PT ; /* 0x000000020c00780c */ /* 0x000fe20003f05270 */ /*0d60*/ I2F R20, R20 ; /* 0x0000001400147306 */ /* 0x002e180000201400 */ /*0d70*/ @P0 LDS R11, [R0.X4+UR5+0x100] ; /* 0x00010005000b0984 */ /* 0x000e620008004800 */ /*0d80*/ FFMA R8, R8, R9, R20 ; /* 0x0000000908087223 */ /* 0x001fc80000000014 */ /*0d90*/ F2I.TRUNC.NTZ R20, R8 ; /* 0x0000000800147305 */ /* 0x000e30000020f100 */ /*0da0*/ @P0 I2F R9, R20 ; /* 0x0000001400090306 */ /* 0x001e640000201400 */ /*0db0*/ @P0 FFMA R10, R11, R10, R9 ; /* 0x0000000a0b0a0223 */ /* 0x002fcc0000000009 */ /*0dc0*/ @P0 F2I.TRUNC.NTZ R20, R10 ; /* 0x0000000a00140305 */ /* 0x000064000020f100 */ /*0dd0*/ MOV R8, c[0x0][0x0] ; /* 0x0000000000087a02 */ /* 0x001fe20000000f00 */ /*0de0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0df0*/ IADD3 R13, R13, c[0x0][0x0], RZ ; /* 0x000000000d0d7a10 */ /* 0x000fc60007ffe0ff */ /*0e00*/ IMAD R7, R8, c[0x0][0x180], R7 ; /* 0x0000600008077a24 */ /* 0x000fe400078e0207 */ /*0e10*/ @!P1 BRA 0x1b0 ; /* 0xfffff39000009947 */ /* 0x000fea000383ffff */ /*0e20*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0e30*/ IMAD R2, R2, c[0x0][0x180], R3 ; /* 0x0000600002027a24 */ /* 0x000fd200078e0203 */ /*0e40*/ IMAD.WIDE R2, R2, R5, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fca00078e0205 */ /*0e50*/ STG.E [R2.64], R20 ; /* 0x0000001402007986 */ /* 0x002fe2000c101906 */ /*0e60*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0e70*/ BRA 0xe70; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0e80*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0e90*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ea0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0eb0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ec0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ed0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ee0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ef0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <cstdlib> #include <time.h> #include <stdlib.h> #include <math.h> /* Authors: Eric Sheeder, Gokul Natesan, Jacob Hollister Parallel Computing Final Project This code generates 2 large matrices and multiplies them, once on the GPU and once on the CPU It expects 3 variables on the command line, n, m, and p, where n and m are the dimensions of A and m and p are the dimensions of B This code currently only works accurately when n, m, and p are all multiples of 32 Algorithm was learned and taken from this paper: https://webs.um.es/jmgarcia/miwiki/lib/exe/fetch.php?id=pubs&cache=cache&media=parco09.pdf */ // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA //#include <helper_functions.h> //This function computes the final values for a single tile in the output matrix (C) __global__ void compute_tile(int *d_A, int *d_B, int *d_C_gpu, int n, int m, int p) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; //Chunk of 32x32 values loaded from A and B __shared__ float A[32][32]; __shared__ float B[32][32]; //Offset into A and B for each thread so it knows which piece of data to load into the shared arrays int indexA = by * (m*blockDim.y) + ty * m + tx; int indexB = bx*blockDim.x + ty*p + tx; //Each thread keeps track of its own sum in this variable int sum = 0; //Run through multiple tiles in A and B to compute our values for our tile in C for (int i = 0; i < p; i+=blockDim.x) { //Have each thread load a value into A and B A[ty][tx] = d_A[indexA]; B[ty][tx] = d_B[indexB]; //Synch threads so all threads wait until all data is loaded before we start calculating __syncthreads(); //Have each thread run through the section of A and B we are at, doing 32 multiplications and summing them for (int j = 0; j < blockDim.x; j++) { sum += A[ty][j] * B[j][tx]; } //Synch threads again so we know each thread is ready to move on to the next part of A and Block __syncthreads(); indexA += blockDim.x; indexB += p*blockDim.x; } //Each thread should now have a complete value for its part in C, so figure out where it should go and store it int indexC = bx * blockDim.x + by * (p * blockDim.y) + p * ty + tx; d_C_gpu[indexC] = sum; } int main (int argc, char *argv[]) { int *A, *B, *C_cpu, *C_gpu; // matrices int n, m, p; // dimensions of matrices srand(time(NULL)); //random numbers each time clock_t cpu_start_time, cpu_end_time; cudaEvent_t gpu_start_time, gpu_end_time; double cpu_total_time; float gpu_total_time; //Make sure user puts in right parameters if (argc !=4) { printf("Usage: <./executable.exe n m p>, where n, m, and p are the dimensions of A (nxm) and B (mxp)"); exit(1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); // Read number of rows (nr), number of columns (nc) and // number of elements and allocate memory for row_ptr, indices, data, b and c. unsigned int mem_size_A = (n*m)*sizeof(int); A = (int *) malloc (mem_size_A); unsigned int mem_size_B = (m*p)*sizeof(int); B = (int *) malloc (mem_size_B); unsigned int mem_size_C = (n*p)*sizeof(int); C_cpu = (int *) malloc (mem_size_C); C_gpu = (int *) malloc (mem_size_C); // Fill A with randomly generated data for (int i=0; i<n*m; i++) { int someInt = 1 + rand() % 10; A[i] = someInt; } // Fill B with randomly generated data for (int i=0; i<m*p; i++) { int someInt = 1 + rand() % 10; B[i] = someInt; } // Fill C with 0s for (int i=0; i<n*p; i++) { C_cpu[i] = 0; C_gpu[i] = 0; } // Allocate device memory int *d_A, *d_B, *d_C_gpu; cudaError_t error; error = cudaMalloc((void **) &d_A, mem_size_A); error = cudaMalloc((void **) &d_B, mem_size_B); error = cudaMalloc((void **) &d_C_gpu, mem_size_C); // copy host memory to device error = cudaMemcpy(d_A, A, mem_size_A, cudaMemcpyHostToDevice); error = cudaMemcpy(d_B, B, mem_size_B, cudaMemcpyHostToDevice); error = cudaMemcpy(d_C_gpu, C_gpu, mem_size_C, cudaMemcpyHostToDevice); // Setup execution parameters for parallel dim3 tile_size(32, 32); dim3 num_blocks(n/tile_size.x + (n % tile_size.x != 0), p/tile_size.y + (p % tile_size.y != 0)); //Execute parallel code error = cudaEventCreate(&gpu_start_time); error = cudaEventCreate(&gpu_end_time); error = cudaEventRecord(gpu_start_time, NULL); //compute_tile<<<num_blocks, tile_size>>>(d_A, d_B, d_C_gpu, n, m, p); error = cudaEventRecord(gpu_end_time, NULL); error = cudaEventSynchronize(gpu_end_time); error = cudaEventElapsedTime(&gpu_total_time, gpu_start_time, gpu_end_time); cpu_start_time = clock(); // MAIN COMPUTATION, SEQUENTIAL VERSION for (int row=0; row < n; row++) { for (int col = 0; col < p; col++) { int sum = 0; for (int i = 0; i < m; i++) { sum += A[row*m + i] * B[i*p + col]; } C_cpu[row*p + col] = sum; } } cpu_end_time = clock(); cpu_total_time = ((double) (cpu_end_time - cpu_start_time)) / CLOCKS_PER_SEC; //Copy result from device to host error = cudaMemcpy(C_gpu, d_C_gpu, mem_size_C, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); //Print values in our 2 c vectors to output.txt //Code taken from http://www.tutorialspoint.com/cprogramming/c_file_io.htm /*FILE *fpout; fpout = fopen("output.txt", "w+"); fprintf(fpout, "index\tCPU\tGPU\tDifference\n"); for (int i = 0; i < n; i++) { int difference = A_cpu[i] - A_gpu[i]; fprintf(fpout, "%i\t%i\t%i\t%i\n", i, A_cpu[i], A_gpu[i], difference); } fclose(fpout);*/ //Print out matrix A /*printf("Matrix A:\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < m; col++) { printf("%i ", A[row*n + col]); } printf("\n"); }*/ //Print out matrix B /*printf("Matrix B:\n"); for (int row = 0; row < m; row++) { for (int col = 0; col < p; col++) { printf("%i ", B[row*n + col]); } printf("\n"); }*/ //Print out matrix C (CPU) /*printf("Matrix C (CPU):\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { printf("%i ", C_cpu[row*n + col]); } printf("\n"); }*/ //Print out matrix C (GPU) /*printf("Matrix C (GPU):\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { printf("%i ", C_gpu[row*n + col]); } printf("\n"); }*/ //Find discrepencies /*for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { if (C_gpu[row*n + col] != C_cpu[row*n + col]) { printf("Error: C_gpu[%i] = %i, C_cpu[%i] = %i\n", row*n+col, C_gpu[row*n + col], row*n+col, C_cpu[row*n + col]); } } }*/ //Print performance time printf("CPU time was %f seconds\n", cpu_total_time); //printf("GPU time was %f milliseconds\n", gpu_total_time); // Clean up memory free(A); free(B); free(C_cpu); free(C_gpu); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C_gpu); cudaDeviceReset(); return 0; }
.file "tmpxft_0006cacd_00000000-6_final_Eric_Sheeder.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "Usage: <./executable.exe n m p>, where n, m, and p are the dimensions of A (nxm) and B (mxp)" .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "CPU time was %f seconds\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $168, %rsp .cfi_def_cfa_offset 224 movl %edi, %ebx movq %rsi, %rbp movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT cmpl $4, %ebx jne .L28 movq 8(%rbp), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %r13 movl %eax, 20(%rsp) movq 16(%rbp), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %r15 movq %rax, 88(%rsp) movl %eax, %ebx movq 24(%rbp), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %r14 movq %rax, 80(%rsp) movl %eax, 16(%rsp) movq %r13, 24(%rsp) movl %r13d, %r12d imull %r15d, %r12d movslq %r12d, %r13 leal 0(,%r12,4), %eax movq %rax, (%rsp) movq %rax, %rdi call malloc@PLT movq %rax, %r15 movl %ebx, %eax imull %r14d, %eax movl %eax, 32(%rsp) movslq %eax, %rsi movq %rsi, 40(%rsp) leal 0(,%rax,4), %eax movq %rax, 48(%rsp) movq %rax, %rdi call malloc@PLT movq %rax, 8(%rsp) movl 24(%rsp), %eax imull %r14d, %eax movl %eax, 56(%rsp) movslq %eax, %rsi movq %rsi, 72(%rsp) leal 0(,%rax,4), %ebp movq %rbp, 64(%rsp) movq %rbp, %rdi call malloc@PLT movq %rax, %r14 movq %rbp, %rdi call malloc@PLT movq %rax, %rbp testl %r12d, %r12d jle .L5 movq %r15, %r12 leaq (%r15,%r13,4), %r13 .L6: call rand@PLT movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $34, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx leal (%rdx,%rdx,4), %edx addl %edx, %edx subl %edx, %eax addl $1, %eax movl %eax, (%r12) addq $4, %r12 cmpq %r13, %r12 jne .L6 .L5: cmpl $0, 32(%rsp) jle .L7 movq 8(%rsp), %rax movq %rax, %r12 movq 40(%rsp), %rsi leaq (%rax,%rsi,4), %r13 .L8: call rand@PLT movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $34, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx leal (%rdx,%rdx,4), %edx addl %edx, %edx subl %edx, %eax addl $1, %eax movl %eax, (%r12) addq $4, %r12 cmpq %r13, %r12 jne .L8 .L7: cmpl $0, 56(%rsp) jle .L9 movq 72(%rsp), %rdx salq $2, %rdx movl $0, %eax .L10: movl $0, (%r14,%rax) movl $0, 0(%rbp,%rax) addq $4, %rax cmpq %rdx, %rax jne .L10 .L9: leaq 128(%rsp), %rdi movq (%rsp), %rsi call cudaMalloc@PLT leaq 136(%rsp), %rdi movq 48(%rsp), %r12 movq %r12, %rsi call cudaMalloc@PLT leaq 144(%rsp), %rdi movq 64(%rsp), %r13 movq %r13, %rsi call cudaMalloc@PLT movl $1, %ecx movq (%rsp), %rdx movq %r15, %rsi movq 128(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %r12, %rdx movq 8(%rsp), %rsi movq 136(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %r13, %rdx movq %rbp, %rsi movq 144(%rsp), %rdi call cudaMemcpy@PLT leaq 112(%rsp), %rdi call cudaEventCreate@PLT leaq 120(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 112(%rsp), %rdi call cudaEventRecord@PLT movl $0, %esi movq 120(%rsp), %rdi call cudaEventRecord@PLT movq 120(%rsp), %rdi call cudaEventSynchronize@PLT leaq 108(%rsp), %rdi movq 120(%rsp), %rdx movq 112(%rsp), %rsi call cudaEventElapsedTime@PLT call clock@PLT movq %rax, 72(%rsp) cmpl $0, 24(%rsp) jle .L11 movq 80(%rsp), %rdi movl %edi, %esi movq 88(%rsp), %rdx movl %edx, 24(%rsp) movslq %edi, %r8 salq $2, %r8 movl $0, %r13d movl $0, %ecx movl $0, %eax leal -1(%rdi), %r12d leal -1(%rdx), %edi movq %rdi, 48(%rsp) leaq 4(%r15), %rdi movq %rdi, 56(%rsp) movq %r15, 32(%rsp) movq %r14, 40(%rsp) movq %rbp, 80(%rsp) jmp .L12 .L28: leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L13: movl (%rax), %edx imull (%rdi), %edx addl %edx, %r9d addq $4, %rax addq %r8, %rdi cmpq %r10, %rax jne .L13 .L15: movl %r9d, (%r14,%r11,4) leaq 1(%r11), %rax addq $4, %rbp cmpq %r12, %r11 je .L24 movq %rax, %r11 .L16: movq %rbp, %rdi movq %r15, %rax movl $0, %r9d testl %ebx, %ebx jg .L13 jmp .L15 .L24: movl (%rsp), %eax .L14: addl $1, %eax addl %esi, %ecx movl 24(%rsp), %edi addl %edi, %r13d movl 20(%rsp), %edi cmpl %edi, %eax je .L26 .L12: cmpl $0, 16(%rsp) jle .L14 movq 8(%rsp), %rbp movslq %r13d, %r9 movq 32(%rsp), %rdi leaq (%rdi,%r9,4), %r15 movq 48(%rsp), %rdi addq %rdi, %r9 movq 56(%rsp), %rdi leaq (%rdi,%r9,4), %r10 movslq %ecx, %r9 movq 40(%rsp), %rdi leaq (%rdi,%r9,4), %r14 movl $0, %r11d movl %eax, (%rsp) jmp .L16 .L26: movq 32(%rsp), %r15 movq 40(%rsp), %r14 movq 80(%rsp), %rbp .L11: call clock@PLT movq 72(%rsp), %rsi subq %rsi, %rax pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 divsd .LC1(%rip), %xmm0 movq %xmm0, %rbx movl $2, %ecx movq 64(%rsp), %rdx movq 144(%rsp), %rsi movq %rbp, %rdi call cudaMemcpy@PLT call cudaDeviceSynchronize@PLT movq %rbx, %xmm0 leaq .LC2(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq %r15, %rdi call free@PLT movq 8(%rsp), %rdi call free@PLT movq %r14, %rdi call free@PLT movq %rbp, %rdi call free@PLT movq 128(%rsp), %rdi call cudaFree@PLT movq 136(%rsp), %rdi call cudaFree@PLT movq 144(%rsp), %rdi call cudaFree@PLT call cudaDeviceReset@PLT movq 152(%rsp), %rax subq %fs:40, %rax jne .L29 movl $0, %eax addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .globl _Z39__device_stub__Z12compute_tilePiS_S_iiiPiS_S_iii .type _Z39__device_stub__Z12compute_tilePiS_S_iiiPiS_S_iii, @function _Z39__device_stub__Z12compute_tilePiS_S_iiiPiS_S_iii: .LFB2082: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 20(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L34 .L30: movq 168(%rsp), %rax subq %fs:40, %rax jne .L35 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L34: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z12compute_tilePiS_S_iii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L30 .L35: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z39__device_stub__Z12compute_tilePiS_S_iiiPiS_S_iii, .-_Z39__device_stub__Z12compute_tilePiS_S_iiiPiS_S_iii .globl _Z12compute_tilePiS_S_iii .type _Z12compute_tilePiS_S_iii, @function _Z12compute_tilePiS_S_iii: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z12compute_tilePiS_S_iiiPiS_S_iii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z12compute_tilePiS_S_iii, .-_Z12compute_tilePiS_S_iii .section .rodata.str1.1 .LC3: .string "_Z12compute_tilePiS_S_iii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z12compute_tilePiS_S_iii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long 0 .long 1093567616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <cstdlib> #include <time.h> #include <stdlib.h> #include <math.h> /* Authors: Eric Sheeder, Gokul Natesan, Jacob Hollister Parallel Computing Final Project This code generates 2 large matrices and multiplies them, once on the GPU and once on the CPU It expects 3 variables on the command line, n, m, and p, where n and m are the dimensions of A and m and p are the dimensions of B This code currently only works accurately when n, m, and p are all multiples of 32 Algorithm was learned and taken from this paper: https://webs.um.es/jmgarcia/miwiki/lib/exe/fetch.php?id=pubs&cache=cache&media=parco09.pdf */ // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA //#include <helper_functions.h> //This function computes the final values for a single tile in the output matrix (C) __global__ void compute_tile(int *d_A, int *d_B, int *d_C_gpu, int n, int m, int p) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; //Chunk of 32x32 values loaded from A and B __shared__ float A[32][32]; __shared__ float B[32][32]; //Offset into A and B for each thread so it knows which piece of data to load into the shared arrays int indexA = by * (m*blockDim.y) + ty * m + tx; int indexB = bx*blockDim.x + ty*p + tx; //Each thread keeps track of its own sum in this variable int sum = 0; //Run through multiple tiles in A and B to compute our values for our tile in C for (int i = 0; i < p; i+=blockDim.x) { //Have each thread load a value into A and B A[ty][tx] = d_A[indexA]; B[ty][tx] = d_B[indexB]; //Synch threads so all threads wait until all data is loaded before we start calculating __syncthreads(); //Have each thread run through the section of A and B we are at, doing 32 multiplications and summing them for (int j = 0; j < blockDim.x; j++) { sum += A[ty][j] * B[j][tx]; } //Synch threads again so we know each thread is ready to move on to the next part of A and Block __syncthreads(); indexA += blockDim.x; indexB += p*blockDim.x; } //Each thread should now have a complete value for its part in C, so figure out where it should go and store it int indexC = bx * blockDim.x + by * (p * blockDim.y) + p * ty + tx; d_C_gpu[indexC] = sum; } int main (int argc, char *argv[]) { int *A, *B, *C_cpu, *C_gpu; // matrices int n, m, p; // dimensions of matrices srand(time(NULL)); //random numbers each time clock_t cpu_start_time, cpu_end_time; cudaEvent_t gpu_start_time, gpu_end_time; double cpu_total_time; float gpu_total_time; //Make sure user puts in right parameters if (argc !=4) { printf("Usage: <./executable.exe n m p>, where n, m, and p are the dimensions of A (nxm) and B (mxp)"); exit(1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); // Read number of rows (nr), number of columns (nc) and // number of elements and allocate memory for row_ptr, indices, data, b and c. unsigned int mem_size_A = (n*m)*sizeof(int); A = (int *) malloc (mem_size_A); unsigned int mem_size_B = (m*p)*sizeof(int); B = (int *) malloc (mem_size_B); unsigned int mem_size_C = (n*p)*sizeof(int); C_cpu = (int *) malloc (mem_size_C); C_gpu = (int *) malloc (mem_size_C); // Fill A with randomly generated data for (int i=0; i<n*m; i++) { int someInt = 1 + rand() % 10; A[i] = someInt; } // Fill B with randomly generated data for (int i=0; i<m*p; i++) { int someInt = 1 + rand() % 10; B[i] = someInt; } // Fill C with 0s for (int i=0; i<n*p; i++) { C_cpu[i] = 0; C_gpu[i] = 0; } // Allocate device memory int *d_A, *d_B, *d_C_gpu; cudaError_t error; error = cudaMalloc((void **) &d_A, mem_size_A); error = cudaMalloc((void **) &d_B, mem_size_B); error = cudaMalloc((void **) &d_C_gpu, mem_size_C); // copy host memory to device error = cudaMemcpy(d_A, A, mem_size_A, cudaMemcpyHostToDevice); error = cudaMemcpy(d_B, B, mem_size_B, cudaMemcpyHostToDevice); error = cudaMemcpy(d_C_gpu, C_gpu, mem_size_C, cudaMemcpyHostToDevice); // Setup execution parameters for parallel dim3 tile_size(32, 32); dim3 num_blocks(n/tile_size.x + (n % tile_size.x != 0), p/tile_size.y + (p % tile_size.y != 0)); //Execute parallel code error = cudaEventCreate(&gpu_start_time); error = cudaEventCreate(&gpu_end_time); error = cudaEventRecord(gpu_start_time, NULL); //compute_tile<<<num_blocks, tile_size>>>(d_A, d_B, d_C_gpu, n, m, p); error = cudaEventRecord(gpu_end_time, NULL); error = cudaEventSynchronize(gpu_end_time); error = cudaEventElapsedTime(&gpu_total_time, gpu_start_time, gpu_end_time); cpu_start_time = clock(); // MAIN COMPUTATION, SEQUENTIAL VERSION for (int row=0; row < n; row++) { for (int col = 0; col < p; col++) { int sum = 0; for (int i = 0; i < m; i++) { sum += A[row*m + i] * B[i*p + col]; } C_cpu[row*p + col] = sum; } } cpu_end_time = clock(); cpu_total_time = ((double) (cpu_end_time - cpu_start_time)) / CLOCKS_PER_SEC; //Copy result from device to host error = cudaMemcpy(C_gpu, d_C_gpu, mem_size_C, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); //Print values in our 2 c vectors to output.txt //Code taken from http://www.tutorialspoint.com/cprogramming/c_file_io.htm /*FILE *fpout; fpout = fopen("output.txt", "w+"); fprintf(fpout, "index\tCPU\tGPU\tDifference\n"); for (int i = 0; i < n; i++) { int difference = A_cpu[i] - A_gpu[i]; fprintf(fpout, "%i\t%i\t%i\t%i\n", i, A_cpu[i], A_gpu[i], difference); } fclose(fpout);*/ //Print out matrix A /*printf("Matrix A:\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < m; col++) { printf("%i ", A[row*n + col]); } printf("\n"); }*/ //Print out matrix B /*printf("Matrix B:\n"); for (int row = 0; row < m; row++) { for (int col = 0; col < p; col++) { printf("%i ", B[row*n + col]); } printf("\n"); }*/ //Print out matrix C (CPU) /*printf("Matrix C (CPU):\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { printf("%i ", C_cpu[row*n + col]); } printf("\n"); }*/ //Print out matrix C (GPU) /*printf("Matrix C (GPU):\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { printf("%i ", C_gpu[row*n + col]); } printf("\n"); }*/ //Find discrepencies /*for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { if (C_gpu[row*n + col] != C_cpu[row*n + col]) { printf("Error: C_gpu[%i] = %i, C_cpu[%i] = %i\n", row*n+col, C_gpu[row*n + col], row*n+col, C_cpu[row*n + col]); } } }*/ //Print performance time printf("CPU time was %f seconds\n", cpu_total_time); //printf("GPU time was %f milliseconds\n", gpu_total_time); // Clean up memory free(A); free(B); free(C_cpu); free(C_gpu); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C_gpu); cudaDeviceReset(); return 0; }
#include <stdio.h> #include <cstdlib> #include <time.h> #include <stdlib.h> #include <math.h> /* Authors: Eric Sheeder, Gokul Natesan, Jacob Hollister Parallel Computing Final Project This code generates 2 large matrices and multiplies them, once on the GPU and once on the CPU It expects 3 variables on the command line, n, m, and p, where n and m are the dimensions of A and m and p are the dimensions of B This code currently only works accurately when n, m, and p are all multiples of 32 Algorithm was learned and taken from this paper: https://webs.um.es/jmgarcia/miwiki/lib/exe/fetch.php?id=pubs&cache=cache&media=parco09.pdf */ // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA //#include <helper_functions.h> //This function computes the final values for a single tile in the output matrix (C) __global__ void compute_tile(int *d_A, int *d_B, int *d_C_gpu, int n, int m, int p) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; //Chunk of 32x32 values loaded from A and B __shared__ float A[32][32]; __shared__ float B[32][32]; //Offset into A and B for each thread so it knows which piece of data to load into the shared arrays int indexA = by * (m*blockDim.y) + ty * m + tx; int indexB = bx*blockDim.x + ty*p + tx; //Each thread keeps track of its own sum in this variable int sum = 0; //Run through multiple tiles in A and B to compute our values for our tile in C for (int i = 0; i < p; i+=blockDim.x) { //Have each thread load a value into A and B A[ty][tx] = d_A[indexA]; B[ty][tx] = d_B[indexB]; //Synch threads so all threads wait until all data is loaded before we start calculating __syncthreads(); //Have each thread run through the section of A and B we are at, doing 32 multiplications and summing them for (int j = 0; j < blockDim.x; j++) { sum += A[ty][j] * B[j][tx]; } //Synch threads again so we know each thread is ready to move on to the next part of A and Block __syncthreads(); indexA += blockDim.x; indexB += p*blockDim.x; } //Each thread should now have a complete value for its part in C, so figure out where it should go and store it int indexC = bx * blockDim.x + by * (p * blockDim.y) + p * ty + tx; d_C_gpu[indexC] = sum; } int main (int argc, char *argv[]) { int *A, *B, *C_cpu, *C_gpu; // matrices int n, m, p; // dimensions of matrices srand(time(NULL)); //random numbers each time clock_t cpu_start_time, cpu_end_time; hipEvent_t gpu_start_time, gpu_end_time; double cpu_total_time; float gpu_total_time; //Make sure user puts in right parameters if (argc !=4) { printf("Usage: <./executable.exe n m p>, where n, m, and p are the dimensions of A (nxm) and B (mxp)"); exit(1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); // Read number of rows (nr), number of columns (nc) and // number of elements and allocate memory for row_ptr, indices, data, b and c. unsigned int mem_size_A = (n*m)*sizeof(int); A = (int *) malloc (mem_size_A); unsigned int mem_size_B = (m*p)*sizeof(int); B = (int *) malloc (mem_size_B); unsigned int mem_size_C = (n*p)*sizeof(int); C_cpu = (int *) malloc (mem_size_C); C_gpu = (int *) malloc (mem_size_C); // Fill A with randomly generated data for (int i=0; i<n*m; i++) { int someInt = 1 + rand() % 10; A[i] = someInt; } // Fill B with randomly generated data for (int i=0; i<m*p; i++) { int someInt = 1 + rand() % 10; B[i] = someInt; } // Fill C with 0s for (int i=0; i<n*p; i++) { C_cpu[i] = 0; C_gpu[i] = 0; } // Allocate device memory int *d_A, *d_B, *d_C_gpu; hipError_t error; error = hipMalloc((void **) &d_A, mem_size_A); error = hipMalloc((void **) &d_B, mem_size_B); error = hipMalloc((void **) &d_C_gpu, mem_size_C); // copy host memory to device error = hipMemcpy(d_A, A, mem_size_A, hipMemcpyHostToDevice); error = hipMemcpy(d_B, B, mem_size_B, hipMemcpyHostToDevice); error = hipMemcpy(d_C_gpu, C_gpu, mem_size_C, hipMemcpyHostToDevice); // Setup execution parameters for parallel dim3 tile_size(32, 32); dim3 num_blocks(n/tile_size.x + (n % tile_size.x != 0), p/tile_size.y + (p % tile_size.y != 0)); //Execute parallel code error = hipEventCreate(&gpu_start_time); error = hipEventCreate(&gpu_end_time); error = hipEventRecord(gpu_start_time, NULL); //compute_tile<<<num_blocks, tile_size>>>(d_A, d_B, d_C_gpu, n, m, p); error = hipEventRecord(gpu_end_time, NULL); error = hipEventSynchronize(gpu_end_time); error = hipEventElapsedTime(&gpu_total_time, gpu_start_time, gpu_end_time); cpu_start_time = clock(); // MAIN COMPUTATION, SEQUENTIAL VERSION for (int row=0; row < n; row++) { for (int col = 0; col < p; col++) { int sum = 0; for (int i = 0; i < m; i++) { sum += A[row*m + i] * B[i*p + col]; } C_cpu[row*p + col] = sum; } } cpu_end_time = clock(); cpu_total_time = ((double) (cpu_end_time - cpu_start_time)) / CLOCKS_PER_SEC; //Copy result from device to host error = hipMemcpy(C_gpu, d_C_gpu, mem_size_C, hipMemcpyDeviceToHost); hipDeviceSynchronize(); //Print values in our 2 c vectors to output.txt //Code taken from http://www.tutorialspoint.com/cprogramming/c_file_io.htm /*FILE *fpout; fpout = fopen("output.txt", "w+"); fprintf(fpout, "index\tCPU\tGPU\tDifference\n"); for (int i = 0; i < n; i++) { int difference = A_cpu[i] - A_gpu[i]; fprintf(fpout, "%i\t%i\t%i\t%i\n", i, A_cpu[i], A_gpu[i], difference); } fclose(fpout);*/ //Print out matrix A /*printf("Matrix A:\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < m; col++) { printf("%i ", A[row*n + col]); } printf("\n"); }*/ //Print out matrix B /*printf("Matrix B:\n"); for (int row = 0; row < m; row++) { for (int col = 0; col < p; col++) { printf("%i ", B[row*n + col]); } printf("\n"); }*/ //Print out matrix C (CPU) /*printf("Matrix C (CPU):\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { printf("%i ", C_cpu[row*n + col]); } printf("\n"); }*/ //Print out matrix C (GPU) /*printf("Matrix C (GPU):\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { printf("%i ", C_gpu[row*n + col]); } printf("\n"); }*/ //Find discrepencies /*for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { if (C_gpu[row*n + col] != C_cpu[row*n + col]) { printf("Error: C_gpu[%i] = %i, C_cpu[%i] = %i\n", row*n+col, C_gpu[row*n + col], row*n+col, C_cpu[row*n + col]); } } }*/ //Print performance time printf("CPU time was %f seconds\n", cpu_total_time); //printf("GPU time was %f milliseconds\n", gpu_total_time); // Clean up memory free(A); free(B); free(C_cpu); free(C_gpu); hipFree(d_A); hipFree(d_B); hipFree(d_C_gpu); hipDeviceReset(); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <stdio.h> #include <cstdlib> #include <time.h> #include <stdlib.h> #include <math.h> /* Authors: Eric Sheeder, Gokul Natesan, Jacob Hollister Parallel Computing Final Project This code generates 2 large matrices and multiplies them, once on the GPU and once on the CPU It expects 3 variables on the command line, n, m, and p, where n and m are the dimensions of A and m and p are the dimensions of B This code currently only works accurately when n, m, and p are all multiples of 32 Algorithm was learned and taken from this paper: https://webs.um.es/jmgarcia/miwiki/lib/exe/fetch.php?id=pubs&cache=cache&media=parco09.pdf */ // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA //#include <helper_functions.h> //This function computes the final values for a single tile in the output matrix (C) __global__ void compute_tile(int *d_A, int *d_B, int *d_C_gpu, int n, int m, int p) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; //Chunk of 32x32 values loaded from A and B __shared__ float A[32][32]; __shared__ float B[32][32]; //Offset into A and B for each thread so it knows which piece of data to load into the shared arrays int indexA = by * (m*blockDim.y) + ty * m + tx; int indexB = bx*blockDim.x + ty*p + tx; //Each thread keeps track of its own sum in this variable int sum = 0; //Run through multiple tiles in A and B to compute our values for our tile in C for (int i = 0; i < p; i+=blockDim.x) { //Have each thread load a value into A and B A[ty][tx] = d_A[indexA]; B[ty][tx] = d_B[indexB]; //Synch threads so all threads wait until all data is loaded before we start calculating __syncthreads(); //Have each thread run through the section of A and B we are at, doing 32 multiplications and summing them for (int j = 0; j < blockDim.x; j++) { sum += A[ty][j] * B[j][tx]; } //Synch threads again so we know each thread is ready to move on to the next part of A and Block __syncthreads(); indexA += blockDim.x; indexB += p*blockDim.x; } //Each thread should now have a complete value for its part in C, so figure out where it should go and store it int indexC = bx * blockDim.x + by * (p * blockDim.y) + p * ty + tx; d_C_gpu[indexC] = sum; } int main (int argc, char *argv[]) { int *A, *B, *C_cpu, *C_gpu; // matrices int n, m, p; // dimensions of matrices srand(time(NULL)); //random numbers each time clock_t cpu_start_time, cpu_end_time; hipEvent_t gpu_start_time, gpu_end_time; double cpu_total_time; float gpu_total_time; //Make sure user puts in right parameters if (argc !=4) { printf("Usage: <./executable.exe n m p>, where n, m, and p are the dimensions of A (nxm) and B (mxp)"); exit(1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); // Read number of rows (nr), number of columns (nc) and // number of elements and allocate memory for row_ptr, indices, data, b and c. unsigned int mem_size_A = (n*m)*sizeof(int); A = (int *) malloc (mem_size_A); unsigned int mem_size_B = (m*p)*sizeof(int); B = (int *) malloc (mem_size_B); unsigned int mem_size_C = (n*p)*sizeof(int); C_cpu = (int *) malloc (mem_size_C); C_gpu = (int *) malloc (mem_size_C); // Fill A with randomly generated data for (int i=0; i<n*m; i++) { int someInt = 1 + rand() % 10; A[i] = someInt; } // Fill B with randomly generated data for (int i=0; i<m*p; i++) { int someInt = 1 + rand() % 10; B[i] = someInt; } // Fill C with 0s for (int i=0; i<n*p; i++) { C_cpu[i] = 0; C_gpu[i] = 0; } // Allocate device memory int *d_A, *d_B, *d_C_gpu; hipError_t error; error = hipMalloc((void **) &d_A, mem_size_A); error = hipMalloc((void **) &d_B, mem_size_B); error = hipMalloc((void **) &d_C_gpu, mem_size_C); // copy host memory to device error = hipMemcpy(d_A, A, mem_size_A, hipMemcpyHostToDevice); error = hipMemcpy(d_B, B, mem_size_B, hipMemcpyHostToDevice); error = hipMemcpy(d_C_gpu, C_gpu, mem_size_C, hipMemcpyHostToDevice); // Setup execution parameters for parallel dim3 tile_size(32, 32); dim3 num_blocks(n/tile_size.x + (n % tile_size.x != 0), p/tile_size.y + (p % tile_size.y != 0)); //Execute parallel code error = hipEventCreate(&gpu_start_time); error = hipEventCreate(&gpu_end_time); error = hipEventRecord(gpu_start_time, NULL); //compute_tile<<<num_blocks, tile_size>>>(d_A, d_B, d_C_gpu, n, m, p); error = hipEventRecord(gpu_end_time, NULL); error = hipEventSynchronize(gpu_end_time); error = hipEventElapsedTime(&gpu_total_time, gpu_start_time, gpu_end_time); cpu_start_time = clock(); // MAIN COMPUTATION, SEQUENTIAL VERSION for (int row=0; row < n; row++) { for (int col = 0; col < p; col++) { int sum = 0; for (int i = 0; i < m; i++) { sum += A[row*m + i] * B[i*p + col]; } C_cpu[row*p + col] = sum; } } cpu_end_time = clock(); cpu_total_time = ((double) (cpu_end_time - cpu_start_time)) / CLOCKS_PER_SEC; //Copy result from device to host error = hipMemcpy(C_gpu, d_C_gpu, mem_size_C, hipMemcpyDeviceToHost); hipDeviceSynchronize(); //Print values in our 2 c vectors to output.txt //Code taken from http://www.tutorialspoint.com/cprogramming/c_file_io.htm /*FILE *fpout; fpout = fopen("output.txt", "w+"); fprintf(fpout, "index\tCPU\tGPU\tDifference\n"); for (int i = 0; i < n; i++) { int difference = A_cpu[i] - A_gpu[i]; fprintf(fpout, "%i\t%i\t%i\t%i\n", i, A_cpu[i], A_gpu[i], difference); } fclose(fpout);*/ //Print out matrix A /*printf("Matrix A:\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < m; col++) { printf("%i ", A[row*n + col]); } printf("\n"); }*/ //Print out matrix B /*printf("Matrix B:\n"); for (int row = 0; row < m; row++) { for (int col = 0; col < p; col++) { printf("%i ", B[row*n + col]); } printf("\n"); }*/ //Print out matrix C (CPU) /*printf("Matrix C (CPU):\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { printf("%i ", C_cpu[row*n + col]); } printf("\n"); }*/ //Print out matrix C (GPU) /*printf("Matrix C (GPU):\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { printf("%i ", C_gpu[row*n + col]); } printf("\n"); }*/ //Find discrepencies /*for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { if (C_gpu[row*n + col] != C_cpu[row*n + col]) { printf("Error: C_gpu[%i] = %i, C_cpu[%i] = %i\n", row*n+col, C_gpu[row*n + col], row*n+col, C_cpu[row*n + col]); } } }*/ //Print performance time printf("CPU time was %f seconds\n", cpu_total_time); //printf("GPU time was %f milliseconds\n", gpu_total_time); // Clean up memory free(A); free(B); free(C_cpu); free(C_gpu); hipFree(d_A); hipFree(d_B); hipFree(d_C_gpu); hipDeviceReset(); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z12compute_tilePiS_S_iii .globl _Z12compute_tilePiS_S_iii .p2align 8 .type _Z12compute_tilePiS_S_iii,@function _Z12compute_tilePiS_S_iii: s_clause 0x1 s_load_b32 s3, s[0:1], 0x20 s_load_b32 s2, s[0:1], 0x34 v_bfe_u32 v2, v0, 10, 10 v_and_b32_e32 v0, 0x3ff, v0 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) v_mul_lo_u32 v5, v2, s3 s_and_b32 s9, s2, 0xffff s_lshr_b32 s8, s2, 16 s_cmp_lt_i32 s3, 1 s_mul_i32 s14, s14, s9 s_cbranch_scc1 .LBB0_5 s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b128 s[4:7], s[0:1], 0x0 v_mad_u64_u32 v[6:7], null, s15, s8, v[2:3] v_lshlrev_b32_e32 v9, 2, v0 v_add3_u32 v1, v5, v0, s14 s_cmp_lg_u32 s9, 0 v_lshlrev_b32_e32 v7, 7, v2 s_cselect_b32 s10, -1, 0 s_mul_i32 s11, s9, s3 v_cndmask_b32_e64 v2, 0, 1, s10 s_mov_b32 s10, 0 s_waitcnt lgkmcnt(0) v_mad_u64_u32 v[3:4], null, v6, s2, v[0:1] v_mov_b32_e32 v6, 0 v_or_b32_e32 v8, 0x1000, v9 v_add_nc_u32_e32 v9, v7, v9 v_cmp_ne_u32_e64 s2, 1, v2 s_delay_alu instid0(VALU_DEP_3) v_add_nc_u32_e32 v10, v8, v7 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_3 .p2align 6 .LBB0_2: v_add_nc_u32_e32 v3, s9, v3 v_add_nc_u32_e32 v1, s11, v1 s_add_i32 s10, s10, s9 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_ge_i32 s10, s3 s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_6 .LBB0_3: v_ashrrev_i32_e32 v4, 31, v3 v_ashrrev_i32_e32 v2, 31, v1 s_mov_b32 s12, s9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[11:12], 2, v[3:4] v_lshlrev_b64 v[13:14], 2, v[1:2] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v11, vcc_lo, s4, v11 v_add_co_ci_u32_e32 v12, vcc_lo, s5, v12, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v13, vcc_lo, s6, v13 v_add_co_ci_u32_e32 v14, vcc_lo, s7, v14, vcc_lo s_and_b32 vcc_lo, exec_lo, s2 global_load_b32 v2, v[11:12], off global_load_b32 v4, v[13:14], off s_waitcnt vmcnt(1) v_cvt_f32_i32_e32 v11, v2 s_waitcnt vmcnt(0) v_cvt_f32_i32_e32 v12, v4 v_mov_b32_e32 v2, v8 v_mov_b32_e32 v4, v7 ds_store_b32 v9, v11 ds_store_b32 v10, v12 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_vccnz .LBB0_2 .LBB0_4: ds_load_b32 v11, v4 ds_load_b32 v12, v2 v_cvt_f32_i32_e32 v6, v6 v_add_nc_u32_e32 v4, 4, v4 v_add_nc_u32_e32 v2, 0x80, v2 s_add_i32 s12, s12, -1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_cmp_eq_u32 s12, 0 s_waitcnt lgkmcnt(0) v_fmac_f32_e32 v6, v11, v12 v_cvt_i32_f32_e32 v6, v6 s_cbranch_scc0 .LBB0_4 s_branch .LBB0_2 .LBB0_5: v_mov_b32_e32 v6, 0 .LBB0_6: s_set_inst_prefetch_distance 0x2 v_add_nc_u32_e32 v0, v5, v0 s_load_b64 s[0:1], s[0:1], 0x10 s_mul_i32 s2, s15, s8 s_delay_alu instid0(SALU_CYCLE_1) s_mul_i32 s2, s2, s3 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_add3_u32 v0, s14, s2, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v1, 31, v0 v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v6, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12compute_tilePiS_S_iii .amdhsa_group_segment_fixed_size 8192 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 15 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z12compute_tilePiS_S_iii, .Lfunc_end0-_Z12compute_tilePiS_S_iii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 8192 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12compute_tilePiS_S_iii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z12compute_tilePiS_S_iii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 15 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <stdio.h> #include <cstdlib> #include <time.h> #include <stdlib.h> #include <math.h> /* Authors: Eric Sheeder, Gokul Natesan, Jacob Hollister Parallel Computing Final Project This code generates 2 large matrices and multiplies them, once on the GPU and once on the CPU It expects 3 variables on the command line, n, m, and p, where n and m are the dimensions of A and m and p are the dimensions of B This code currently only works accurately when n, m, and p are all multiples of 32 Algorithm was learned and taken from this paper: https://webs.um.es/jmgarcia/miwiki/lib/exe/fetch.php?id=pubs&cache=cache&media=parco09.pdf */ // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA //#include <helper_functions.h> //This function computes the final values for a single tile in the output matrix (C) __global__ void compute_tile(int *d_A, int *d_B, int *d_C_gpu, int n, int m, int p) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; //Chunk of 32x32 values loaded from A and B __shared__ float A[32][32]; __shared__ float B[32][32]; //Offset into A and B for each thread so it knows which piece of data to load into the shared arrays int indexA = by * (m*blockDim.y) + ty * m + tx; int indexB = bx*blockDim.x + ty*p + tx; //Each thread keeps track of its own sum in this variable int sum = 0; //Run through multiple tiles in A and B to compute our values for our tile in C for (int i = 0; i < p; i+=blockDim.x) { //Have each thread load a value into A and B A[ty][tx] = d_A[indexA]; B[ty][tx] = d_B[indexB]; //Synch threads so all threads wait until all data is loaded before we start calculating __syncthreads(); //Have each thread run through the section of A and B we are at, doing 32 multiplications and summing them for (int j = 0; j < blockDim.x; j++) { sum += A[ty][j] * B[j][tx]; } //Synch threads again so we know each thread is ready to move on to the next part of A and Block __syncthreads(); indexA += blockDim.x; indexB += p*blockDim.x; } //Each thread should now have a complete value for its part in C, so figure out where it should go and store it int indexC = bx * blockDim.x + by * (p * blockDim.y) + p * ty + tx; d_C_gpu[indexC] = sum; } int main (int argc, char *argv[]) { int *A, *B, *C_cpu, *C_gpu; // matrices int n, m, p; // dimensions of matrices srand(time(NULL)); //random numbers each time clock_t cpu_start_time, cpu_end_time; hipEvent_t gpu_start_time, gpu_end_time; double cpu_total_time; float gpu_total_time; //Make sure user puts in right parameters if (argc !=4) { printf("Usage: <./executable.exe n m p>, where n, m, and p are the dimensions of A (nxm) and B (mxp)"); exit(1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); // Read number of rows (nr), number of columns (nc) and // number of elements and allocate memory for row_ptr, indices, data, b and c. unsigned int mem_size_A = (n*m)*sizeof(int); A = (int *) malloc (mem_size_A); unsigned int mem_size_B = (m*p)*sizeof(int); B = (int *) malloc (mem_size_B); unsigned int mem_size_C = (n*p)*sizeof(int); C_cpu = (int *) malloc (mem_size_C); C_gpu = (int *) malloc (mem_size_C); // Fill A with randomly generated data for (int i=0; i<n*m; i++) { int someInt = 1 + rand() % 10; A[i] = someInt; } // Fill B with randomly generated data for (int i=0; i<m*p; i++) { int someInt = 1 + rand() % 10; B[i] = someInt; } // Fill C with 0s for (int i=0; i<n*p; i++) { C_cpu[i] = 0; C_gpu[i] = 0; } // Allocate device memory int *d_A, *d_B, *d_C_gpu; hipError_t error; error = hipMalloc((void **) &d_A, mem_size_A); error = hipMalloc((void **) &d_B, mem_size_B); error = hipMalloc((void **) &d_C_gpu, mem_size_C); // copy host memory to device error = hipMemcpy(d_A, A, mem_size_A, hipMemcpyHostToDevice); error = hipMemcpy(d_B, B, mem_size_B, hipMemcpyHostToDevice); error = hipMemcpy(d_C_gpu, C_gpu, mem_size_C, hipMemcpyHostToDevice); // Setup execution parameters for parallel dim3 tile_size(32, 32); dim3 num_blocks(n/tile_size.x + (n % tile_size.x != 0), p/tile_size.y + (p % tile_size.y != 0)); //Execute parallel code error = hipEventCreate(&gpu_start_time); error = hipEventCreate(&gpu_end_time); error = hipEventRecord(gpu_start_time, NULL); //compute_tile<<<num_blocks, tile_size>>>(d_A, d_B, d_C_gpu, n, m, p); error = hipEventRecord(gpu_end_time, NULL); error = hipEventSynchronize(gpu_end_time); error = hipEventElapsedTime(&gpu_total_time, gpu_start_time, gpu_end_time); cpu_start_time = clock(); // MAIN COMPUTATION, SEQUENTIAL VERSION for (int row=0; row < n; row++) { for (int col = 0; col < p; col++) { int sum = 0; for (int i = 0; i < m; i++) { sum += A[row*m + i] * B[i*p + col]; } C_cpu[row*p + col] = sum; } } cpu_end_time = clock(); cpu_total_time = ((double) (cpu_end_time - cpu_start_time)) / CLOCKS_PER_SEC; //Copy result from device to host error = hipMemcpy(C_gpu, d_C_gpu, mem_size_C, hipMemcpyDeviceToHost); hipDeviceSynchronize(); //Print values in our 2 c vectors to output.txt //Code taken from http://www.tutorialspoint.com/cprogramming/c_file_io.htm /*FILE *fpout; fpout = fopen("output.txt", "w+"); fprintf(fpout, "index\tCPU\tGPU\tDifference\n"); for (int i = 0; i < n; i++) { int difference = A_cpu[i] - A_gpu[i]; fprintf(fpout, "%i\t%i\t%i\t%i\n", i, A_cpu[i], A_gpu[i], difference); } fclose(fpout);*/ //Print out matrix A /*printf("Matrix A:\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < m; col++) { printf("%i ", A[row*n + col]); } printf("\n"); }*/ //Print out matrix B /*printf("Matrix B:\n"); for (int row = 0; row < m; row++) { for (int col = 0; col < p; col++) { printf("%i ", B[row*n + col]); } printf("\n"); }*/ //Print out matrix C (CPU) /*printf("Matrix C (CPU):\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { printf("%i ", C_cpu[row*n + col]); } printf("\n"); }*/ //Print out matrix C (GPU) /*printf("Matrix C (GPU):\n"); for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { printf("%i ", C_gpu[row*n + col]); } printf("\n"); }*/ //Find discrepencies /*for (int row = 0; row < n; row++) { for (int col = 0; col < p; col++) { if (C_gpu[row*n + col] != C_cpu[row*n + col]) { printf("Error: C_gpu[%i] = %i, C_cpu[%i] = %i\n", row*n+col, C_gpu[row*n + col], row*n+col, C_cpu[row*n + col]); } } }*/ //Print performance time printf("CPU time was %f seconds\n", cpu_total_time); //printf("GPU time was %f milliseconds\n", gpu_total_time); // Clean up memory free(A); free(B); free(C_cpu); free(C_gpu); hipFree(d_A); hipFree(d_B); hipFree(d_C_gpu); hipDeviceReset(); return 0; }
.text .file "final_Eric_Sheeder.hip" .globl _Z27__device_stub__compute_tilePiS_S_iii # -- Begin function _Z27__device_stub__compute_tilePiS_S_iii .p2align 4, 0x90 .type _Z27__device_stub__compute_tilePiS_S_iii,@function _Z27__device_stub__compute_tilePiS_S_iii: # @_Z27__device_stub__compute_tilePiS_S_iii .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 20(%rsp), %rax movq %rax, 120(%rsp) leaq 16(%rsp), %rax movq %rax, 128(%rsp) leaq 12(%rsp), %rax movq %rax, 136(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z12compute_tilePiS_S_iii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z27__device_stub__compute_tilePiS_S_iii, .Lfunc_end0-_Z27__device_stub__compute_tilePiS_S_iii .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI1_0: .quad 0x412e848000000000 # double 1.0E+6 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $88, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rsi, %rbx movl %edi, %ebp xorl %edi, %edi callq time movl %eax, %edi callq srand cmpl $4, %ebp jne .LBB1_10 # %bb.1: movq 8(%rbx), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r13 movq 16(%rbx), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r12 movq 24(%rbx), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r15 movl %r12d, %ebp imull %r13d, %ebp leal (,%rbp,4), %edi movq %rdi, 8(%rsp) # 8-byte Spill callq malloc movq %rax, %rbx imull %r15d, %r12d leal (,%r12,4), %edi movq %rdi, 56(%rsp) # 8-byte Spill callq malloc movq %rax, %r14 imull %r13d, %r15d leal (,%r15,4), %edi movq %rdi, 64(%rsp) # 8-byte Spill callq malloc movq %rax, 72(%rsp) # 8-byte Spill testl %ebp, %ebp jle .LBB1_4 # %bb.2: # %.lr.ph.preheader movl %ebp, %r13d xorl %ebp, %ebp .p2align 4, 0x90 .LBB1_3: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $34, %rcx addl %edx, %ecx addl %ecx, %ecx leal (%rcx,%rcx,4), %ecx negl %ecx addl %ecx, %eax incl %eax movl %eax, (%rbx,%rbp,4) incq %rbp cmpq %rbp, %r13 jne .LBB1_3 .LBB1_4: # %.preheader93 testl %r12d, %r12d jle .LBB1_7 # %bb.5: # %.lr.ph96.preheader movl %r12d, %r12d xorl %r13d, %r13d .p2align 4, 0x90 .LBB1_6: # %.lr.ph96 # =>This Inner Loop Header: Depth=1 callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $34, %rcx addl %edx, %ecx addl %ecx, %ecx leal (%rcx,%rcx,4), %ecx negl %ecx addl %ecx, %eax incl %eax movl %eax, (%r14,%r13,4) incq %r13 cmpq %r13, %r12 jne .LBB1_6 .LBB1_7: # %.preheader92 testl %r15d, %r15d movq 72(%rsp), %r12 # 8-byte Reload jle .LBB1_9 # %bb.8: # %.lr.ph98.preheader movl %r15d, %edx shlq $2, %rdx movq %r12, %rdi xorl %esi, %esi callq memset@PLT .LBB1_9: # %._crit_edge leaq 40(%rsp), %rdi movq 8(%rsp), %r15 # 8-byte Reload movq %r15, %rsi callq hipMalloc leaq 32(%rsp), %rdi movq 56(%rsp), %rbp # 8-byte Reload movq %rbp, %rsi callq hipMalloc leaq 16(%rsp), %rdi movq 64(%rsp), %r13 # 8-byte Reload movq %r13, %rsi callq hipMalloc movq 40(%rsp), %rdi movq %rbx, %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy movq 32(%rsp), %rdi movq %r14, %rsi movq %rbp, %rdx movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movq %r12, %rsi movq %r13, %rdx movl $1, %ecx callq hipMemcpy leaq 48(%rsp), %rdi callq hipEventCreate leaq 24(%rsp), %rdi callq hipEventCreate movq 48(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 24(%rsp), %rdi callq hipEventSynchronize movq 48(%rsp), %rsi movq 24(%rsp), %rdx leaq 84(%rsp), %rdi callq hipEventElapsedTime callq clock movq %rax, %r15 callq clock subq %r15, %rax cvtsi2sd %rax, %xmm0 divsd .LCPI1_0(%rip), %xmm0 movsd %xmm0, 8(%rsp) # 8-byte Spill movq 16(%rsp), %rsi movq %r12, %rdi movq %r13, %rdx movl $2, %ecx callq hipMemcpy callq hipDeviceSynchronize movl $.L.str.1, %edi movsd 8(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movb $1, %al callq printf movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r12, %rdi callq free movq 40(%rsp), %rdi callq hipFree movq 32(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree callq hipDeviceReset xorl %eax, %eax addq $88, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB1_10: .cfi_def_cfa_offset 144 movl $.L.str, %edi xorl %eax, %eax callq printf movl $1, %edi callq exit .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12compute_tilePiS_S_iii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z12compute_tilePiS_S_iii,@object # @_Z12compute_tilePiS_S_iii .section .rodata,"a",@progbits .globl _Z12compute_tilePiS_S_iii .p2align 3, 0x0 _Z12compute_tilePiS_S_iii: .quad _Z27__device_stub__compute_tilePiS_S_iii .size _Z12compute_tilePiS_S_iii, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Usage: <./executable.exe n m p>, where n, m, and p are the dimensions of A (nxm) and B (mxp)" .size .L.str, 93 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "CPU time was %f seconds\n" .size .L.str.1, 25 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z12compute_tilePiS_S_iii" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__compute_tilePiS_S_iii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12compute_tilePiS_S_iii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z12compute_tilePiS_S_iii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e220000002100 */ /*0020*/ MOV R4, c[0x0][0x180] ; /* 0x0000600000047a02 */ /* 0x000fe20000000f00 */ /*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fe20000000a00 */ /*0040*/ IMAD.MOV.U32 R20, RZ, RZ, RZ ; /* 0x000000ffff147224 */ /* 0x000fe200078e00ff */ /*0050*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e220000002500 */ /*0060*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */ /* 0x000fc60003f06270 */ /*0070*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */ /* 0x000e680000002600 */ /*0080*/ S2R R13, SR_TID.Y ; /* 0x00000000000d7919 */ /* 0x000ea20000002200 */ /*0090*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */ /* 0x001fe400078e0200 */ /*00a0*/ IMAD R2, R2, c[0x0][0x4], RZ ; /* 0x0000010002027a24 */ /* 0x002fe400078e02ff */ /*00b0*/ IMAD R3, R13, c[0x0][0x180], R3 ; /* 0x000060000d037a24 */ /* 0x004fe200078e0203 */ /*00c0*/ @!P0 BRA 0xe20 ; /* 0x00000d5000008947 */ /* 0x000fea0003800000 */ /*00d0*/ MOV R12, c[0x0][0x0] ; /* 0x00000000000c7a02 */ /* 0x000fe20000000f00 */ /*00e0*/ HFMA2.MMA R6, -RZ, RZ, 0, 0 ; /* 0x00000000ff067435 */ /* 0x000fe200000001ff */ /*00f0*/ SHF.L.U32 R5, R13, 0x7, RZ ; /* 0x000000070d057819 */ /* 0x000fe200000006ff */ /*0100*/ IMAD.IADD R13, R2, 0x1, R13 ; /* 0x00000001020d7824 */ /* 0x000fe200078e020d */ /*0110*/ IADD3 R4, R12.reuse, -0x1, RZ ; /* 0xffffffff0c047810 */ /* 0x040fe20007ffe0ff */ /*0120*/ IMAD.MOV.U32 R7, RZ, RZ, R3 ; /* 0x000000ffff077224 */ /* 0x000fe200078e0003 */ /*0130*/ LOP3.LUT R12, R12, 0x3, RZ, 0xc0, !PT ; /* 0x000000030c0c7812 */ /* 0x000fe200078ec0ff */ /*0140*/ IMAD R13, R13, c[0x0][0x17c], R0 ; /* 0x00005f000d0d7a24 */ /* 0x000fe200078e0200 */ /*0150*/ ISETP.GE.U32.AND P2, PT, R4, 0x3, PT ; /* 0x000000030400780c */ /* 0x000fc40003f46070 */ /*0160*/ LEA R4, R0.reuse, 0x1100, 0x2 ; /* 0x0000110000047811 */ /* 0x040fe400078e10ff */ /*0170*/ MOV R20, RZ ; /* 0x000000ff00147202 */ /* 0x000fe40000000f00 */ /*0180*/ LEA R14, R0, R5, 0x2 ; /* 0x00000005000e7211 */ /* 0x000fe400078e10ff */ /*0190*/ IADD3 R15, R5, 0x8, RZ ; /* 0x00000008050f7810 */ /* 0x000fe40007ffe0ff */ /*01a0*/ IADD3 R16, -R12, c[0x0][0x0], RZ ; /* 0x000000000c107a10 */ /* 0x000fe40007ffe1ff */ /*01b0*/ MOV R8, 0x4 ; /* 0x0000000400087802 */ /* 0x000fca0000000f00 */ /*01c0*/ IMAD.WIDE R10, R13, R8, c[0x0][0x160] ; /* 0x000058000d0a7625 */ /* 0x000fc800078e0208 */ /*01d0*/ IMAD.WIDE R8, R7, R8, c[0x0][0x168] ; /* 0x00005a0007087625 */ /* 0x000fe400078e0208 */ /*01e0*/ LDG.E R10, [R10.64] ; /* 0x000000060a0a7981 */ /* 0x000ea8000c1e1900 */ /*01f0*/ LDG.E R8, [R8.64] ; /* 0x0000000608087981 */ /* 0x000ee2000c1e1900 */ /*0200*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x0], PT ; /* 0x00000000ff007a0c */ /* 0x000fe40003f05270 */ /*0210*/ IADD3 R6, R6, c[0x0][0x0], RZ ; /* 0x0000000006067a10 */ /* 0x000fc80007ffe0ff */ /*0220*/ ISETP.GE.AND P1, PT, R6, c[0x0][0x180], PT ; /* 0x0000600006007a0c */ /* 0x000fe20003f26270 */ /*0230*/ I2F R17, R10 ; /* 0x0000000a00117306 */ /* 0x004e300000201400 */ /*0240*/ I2F R19, R8 ; /* 0x0000000800137306 */ /* 0x00ae620000201400 */ /*0250*/ STS [R14], R17 ; /* 0x000000110e007388 */ /* 0x0011e80000000800 */ /*0260*/ STS [R14+0x1000], R19 ; /* 0x001000130e007388 */ /* 0x0021e80000000800 */ /*0270*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0280*/ @!P0 BRA 0xdd0 ; /* 0x00000b4000008947 */ /* 0x000fea0003800000 */ /*0290*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x001fe20008000000 */ /*02a0*/ @!P2 BRA 0xc80 ; /* 0x000009d00000a947 */ /* 0x000fea0003800000 */ /*02b0*/ ISETP.GT.AND P0, PT, R16, RZ, PT ; /* 0x000000ff1000720c */ /* 0x000fe20003f04270 */ /*02c0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fe20008000000 */ /*02d0*/ IMAD.MOV.U32 R19, RZ, RZ, R15 ; /* 0x000000ffff137224 */ /* 0x000fe200078e000f */ /*02e0*/ MOV R18, R4 ; /* 0x0000000400127202 */ /* 0x000fc40000000f00 */ /*02f0*/ MOV R17, R16 ; /* 0x0000001000117202 */ /* 0x000fd00000000f00 */ /*0300*/ @!P0 BRA 0xb00 ; /* 0x000007f000008947 */ /* 0x000fea0003800000 */ /*0310*/ ISETP.GT.AND P3, PT, R17, 0xc, PT ; /* 0x0000000c1100780c */ /* 0x000fe40003f64270 */ /*0320*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*0330*/ @!P3 BRA 0x830 ; /* 0x000004f00000b947 */ /* 0x000fea0003800000 */ /*0340*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0350*/ LDS R9, [R18+-0x100] ; /* 0xffff000012097984 */ /* 0x000fe20000000800 */ /*0360*/ I2F R20, R20 ; /* 0x0000001400147306 */ /* 0x001e220000201400 */ /*0370*/ IADD3 R17, R17, -0x10, RZ ; /* 0xfffffff011117810 */ /* 0x000fe20007ffe0ff */ /*0380*/ UIADD3 UR4, UR4, 0x10, URZ ; /* 0x0000001004047890 */ /* 0x000fe2000fffe03f */ /*0390*/ LDS.64 R10, [R19+-0x8] ; /* 0xfffff800130a7984 */ /* 0x000e240000000a00 */ /*03a0*/ ISETP.GT.AND P3, PT, R17, 0xc, PT ; /* 0x0000000c1100780c */ /* 0x000fe40003f64270 */ /*03b0*/ LDS R22, [R18+-0x80] ; /* 0xffff800012167984 */ /* 0x000e680000000800 */ /*03c0*/ LDS R23, [R18] ; /* 0x0000000012177984 */ /* 0x000fe20000000800 */ /*03d0*/ FFMA R10, R9, R10, R20 ; /* 0x0000000a090a7223 */ /* 0x001fc60000000014 */ /*03e0*/ LDS.64 R8, [R19] ; /* 0x0000000013087984 */ /* 0x000e260000000a00 */ /*03f0*/ F2I.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000eb0000020f100 */ /*0400*/ I2F R21, R10 ; /* 0x0000000a00157306 */ /* 0x004e640000201400 */ /*0410*/ FFMA R21, R22, R11, R21 ; /* 0x0000000b16157223 */ /* 0x002fc40000000015 */ /*0420*/ LDS R22, [R18+0x80] ; /* 0x0000800012167984 */ /* 0x000e680000000800 */ /*0430*/ F2I.TRUNC.NTZ R21, R21 ; /* 0x0000001500157305 */ /* 0x000eb0000020f100 */ /*0440*/ I2F R11, R21 ; /* 0x00000015000b7306 */ /* 0x004e240000201400 */ /*0450*/ FFMA R8, R23, R8, R11 ; /* 0x0000000817087223 */ /* 0x001fc4000000000b */ /*0460*/ LDS R23, [R18+0x100] ; /* 0x0001000012177984 */ /* 0x000fe80000000800 */ /*0470*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e22000020f100 */ /*0480*/ LDS.64 R10, [R19+0x8] ; /* 0x00000800130a7984 */ /* 0x000eae0000000a00 */ /*0490*/ I2F R20, R8 ; /* 0x0000000800147306 */ /* 0x001e640000201400 */ /*04a0*/ FFMA R20, R22, R9, R20 ; /* 0x0000000916147223 */ /* 0x002fc40000000014 */ /*04b0*/ LDS R22, [R18+0x180] ; /* 0x0001800012167984 */ /* 0x000e280000000800 */ /*04c0*/ F2I.TRUNC.NTZ R20, R20 ; /* 0x0000001400147305 */ /* 0x000e70000020f100 */ /*04d0*/ I2F R9, R20 ; /* 0x0000001400097306 */ /* 0x002ea40000201400 */ /*04e0*/ FFMA R10, R23, R10, R9 ; /* 0x0000000a170a7223 */ /* 0x004fc40000000009 */ /*04f0*/ LDS R23, [R18+0x200] ; /* 0x0002000012177984 */ /* 0x000fe80000000800 */ /*0500*/ F2I.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000e62000020f100 */ /*0510*/ LDS.64 R8, [R19+0x10] ; /* 0x0000100013087984 */ /* 0x000eae0000000a00 */ /*0520*/ I2F R21, R10 ; /* 0x0000000a00157306 */ /* 0x002e240000201400 */ /*0530*/ FFMA R21, R22, R11, R21 ; /* 0x0000000b16157223 */ /* 0x001fc40000000015 */ /*0540*/ LDS R22, [R18+0x280] ; /* 0x0002800012167984 */ /* 0x000e280000000800 */ /*0550*/ F2I.TRUNC.NTZ R21, R21 ; /* 0x0000001500157305 */ /* 0x000e70000020f100 */ /*0560*/ I2F R11, R21 ; /* 0x00000015000b7306 */ /* 0x002ea40000201400 */ /*0570*/ FFMA R8, R23, R8, R11 ; /* 0x0000000817087223 */ /* 0x004fc4000000000b */ /*0580*/ LDS R23, [R18+0x300] ; /* 0x0003000012177984 */ /* 0x000fe80000000800 */ /*0590*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e62000020f100 */ /*05a0*/ LDS.64 R10, [R19+0x18] ; /* 0x00001800130a7984 */ /* 0x000eae0000000a00 */ /*05b0*/ I2F R20, R8 ; /* 0x0000000800147306 */ /* 0x002e240000201400 */ /*05c0*/ FFMA R20, R22, R9, R20 ; /* 0x0000000916147223 */ /* 0x001fc40000000014 */ /*05d0*/ LDS R22, [R18+0x380] ; /* 0x0003800012167984 */ /* 0x000e280000000800 */ /*05e0*/ F2I.TRUNC.NTZ R20, R20 ; /* 0x0000001400147305 */ /* 0x000e70000020f100 */ /*05f0*/ I2F R9, R20 ; /* 0x0000001400097306 */ /* 0x002ea40000201400 */ /*0600*/ FFMA R10, R23, R10, R9 ; /* 0x0000000a170a7223 */ /* 0x004fc40000000009 */ /*0610*/ LDS R23, [R18+0x400] ; /* 0x0004000012177984 */ /* 0x000fe80000000800 */ /*0620*/ F2I.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000e62000020f100 */ /*0630*/ LDS.64 R8, [R19+0x20] ; /* 0x0000200013087984 */ /* 0x000eae0000000a00 */ /*0640*/ I2F R21, R10 ; /* 0x0000000a00157306 */ /* 0x002e240000201400 */ /*0650*/ FFMA R21, R22, R11, R21 ; /* 0x0000000b16157223 */ /* 0x001fc40000000015 */ /*0660*/ LDS R22, [R18+0x480] ; /* 0x0004800012167984 */ /* 0x000e280000000800 */ /*0670*/ F2I.TRUNC.NTZ R21, R21 ; /* 0x0000001500157305 */ /* 0x000e70000020f100 */ /*0680*/ I2F R11, R21 ; /* 0x00000015000b7306 */ /* 0x002ea40000201400 */ /*0690*/ FFMA R8, R23, R8, R11 ; /* 0x0000000817087223 */ /* 0x004fc4000000000b */ /*06a0*/ LDS R23, [R18+0x500] ; /* 0x0005000012177984 */ /* 0x000fe80000000800 */ /*06b0*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e62000020f100 */ /*06c0*/ LDS.64 R10, [R19+0x28] ; /* 0x00002800130a7984 */ /* 0x000eae0000000a00 */ /*06d0*/ I2F R20, R8 ; /* 0x0000000800147306 */ /* 0x002e240000201400 */ /*06e0*/ FFMA R20, R22, R9, R20 ; /* 0x0000000916147223 */ /* 0x001fc40000000014 */ /*06f0*/ LDS R22, [R18+0x580] ; /* 0x0005800012167984 */ /* 0x000e280000000800 */ /*0700*/ F2I.TRUNC.NTZ R20, R20 ; /* 0x0000001400147305 */ /* 0x000e70000020f100 */ /*0710*/ I2F R9, R20 ; /* 0x0000001400097306 */ /* 0x002ea40000201400 */ /*0720*/ FFMA R10, R23, R10, R9 ; /* 0x0000000a170a7223 */ /* 0x004fc40000000009 */ /*0730*/ LDS R23, [R18+0x600] ; /* 0x0006000012177984 */ /* 0x000fe80000000800 */ /*0740*/ F2I.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000e62000020f100 */ /*0750*/ LDS.64 R8, [R19+0x30] ; /* 0x0000300013087984 */ /* 0x0004e40000000a00 */ /*0760*/ IADD3 R19, R19, 0x40, RZ ; /* 0x0000004013137810 */ /* 0x004fca0007ffe0ff */ /*0770*/ I2F R21, R10 ; /* 0x0000000a00157306 */ /* 0x002e240000201400 */ /*0780*/ FFMA R11, R22, R11, R21 ; /* 0x0000000b160b7223 */ /* 0x001fe40000000015 */ /*0790*/ LDS R22, [R18+0x680] ; /* 0x0006800012167984 */ /* 0x0000680000000800 */ /*07a0*/ F2I.TRUNC.NTZ R11, R11 ; /* 0x0000000b000b7305 */ /* 0x000ea2000020f100 */ /*07b0*/ IADD3 R18, R18, 0x800, RZ ; /* 0x0000080012127810 */ /* 0x001fce0007ffe0ff */ /*07c0*/ I2F R21, R11 ; /* 0x0000000b00157306 */ /* 0x004ee40000201400 */ /*07d0*/ FFMA R8, R23, R8, R21 ; /* 0x0000000817087223 */ /* 0x008fcc0000000015 */ /*07e0*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e30000020f100 */ /*07f0*/ I2F R20, R8 ; /* 0x0000000800147306 */ /* 0x001e640000201400 */ /*0800*/ FFMA R20, R22, R9, R20 ; /* 0x0000000916147223 */ /* 0x002fcc0000000014 */ /*0810*/ F2I.TRUNC.NTZ R20, R20 ; /* 0x0000001400147305 */ /* 0x000e22000020f100 */ /*0820*/ @P3 BRA 0x350 ; /* 0xfffffb2000003947 */ /* 0x000fea000383ffff */ /*0830*/ ISETP.GT.AND P3, PT, R17, 0x4, PT ; /* 0x000000041100780c */ /* 0x000fda0003f64270 */ /*0840*/ @!P3 BRA 0xae0 ; /* 0x000002900000b947 */ /* 0x000fea0003800000 */ /*0850*/ LDS R9, [R18+-0x100] ; /* 0xffff000012097984 */ /* 0x000fe20000000800 */ /*0860*/ I2F R20, R20 ; /* 0x0000001400147306 */ /* 0x001e220000201400 */ /*0870*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe20003f0e170 */ /*0880*/ UIADD3 UR4, UR4, 0x8, URZ ; /* 0x0000000804047890 */ /* 0x000fe2000fffe03f */ /*0890*/ LDS.64 R10, [R19+-0x8] ; /* 0xfffff800130a7984 */ /* 0x000e220000000a00 */ /*08a0*/ IADD3 R17, R17, -0x8, RZ ; /* 0xfffffff811117810 */ /* 0x000fc60007ffe0ff */ /*08b0*/ LDS R22, [R18+-0x80] ; /* 0xffff800012167984 */ /* 0x000e680000000800 */ /*08c0*/ LDS R23, [R18] ; /* 0x0000000012177984 */ /* 0x000fe20000000800 */ /*08d0*/ FFMA R10, R9, R10, R20 ; /* 0x0000000a090a7223 */ /* 0x001fc60000000014 */ /*08e0*/ LDS.64 R8, [R19] ; /* 0x0000000013087984 */ /* 0x000e260000000a00 */ /*08f0*/ F2I.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000eb0000020f100 */ /*0900*/ I2F R21, R10 ; /* 0x0000000a00157306 */ /* 0x004e640000201400 */ /*0910*/ FFMA R21, R22, R11, R21 ; /* 0x0000000b16157223 */ /* 0x002fc40000000015 */ /*0920*/ LDS R22, [R18+0x80] ; /* 0x0000800012167984 */ /* 0x000e680000000800 */ /*0930*/ F2I.TRUNC.NTZ R21, R21 ; /* 0x0000001500157305 */ /* 0x000eb0000020f100 */ /*0940*/ I2F R11, R21 ; /* 0x00000015000b7306 */ /* 0x004e240000201400 */ /*0950*/ FFMA R8, R23, R8, R11 ; /* 0x0000000817087223 */ /* 0x001fc4000000000b */ /*0960*/ LDS R23, [R18+0x100] ; /* 0x0001000012177984 */ /* 0x000fe80000000800 */ /*0970*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e22000020f100 */ /*0980*/ LDS.64 R10, [R19+0x8] ; /* 0x00000800130a7984 */ /* 0x000eae0000000a00 */ /*0990*/ I2F R20, R8 ; /* 0x0000000800147306 */ /* 0x001e640000201400 */ /*09a0*/ FFMA R20, R22, R9, R20 ; /* 0x0000000916147223 */ /* 0x002fc40000000014 */ /*09b0*/ LDS R22, [R18+0x180] ; /* 0x0001800012167984 */ /* 0x000e280000000800 */ /*09c0*/ F2I.TRUNC.NTZ R20, R20 ; /* 0x0000001400147305 */ /* 0x000e70000020f100 */ /*09d0*/ I2F R9, R20 ; /* 0x0000001400097306 */ /* 0x002ea40000201400 */ /*09e0*/ FFMA R10, R23, R10, R9 ; /* 0x0000000a170a7223 */ /* 0x004fc40000000009 */ /*09f0*/ LDS R23, [R18+0x200] ; /* 0x0002000012177984 */ /* 0x000fe80000000800 */ /*0a00*/ F2I.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000e62000020f100 */ /*0a10*/ LDS.64 R8, [R19+0x10] ; /* 0x0000100013087984 */ /* 0x0004e40000000a00 */ /*0a20*/ IADD3 R19, R19, 0x20, RZ ; /* 0x0000002013137810 */ /* 0x004fca0007ffe0ff */ /*0a30*/ I2F R21, R10 ; /* 0x0000000a00157306 */ /* 0x002e240000201400 */ /*0a40*/ FFMA R11, R22, R11, R21 ; /* 0x0000000b160b7223 */ /* 0x001fe40000000015 */ /*0a50*/ LDS R22, [R18+0x280] ; /* 0x0002800012167984 */ /* 0x0000680000000800 */ /*0a60*/ F2I.TRUNC.NTZ R11, R11 ; /* 0x0000000b000b7305 */ /* 0x000ea2000020f100 */ /*0a70*/ IADD3 R18, R18, 0x400, RZ ; /* 0x0000040012127810 */ /* 0x001fce0007ffe0ff */ /*0a80*/ I2F R21, R11 ; /* 0x0000000b00157306 */ /* 0x004ee40000201400 */ /*0a90*/ FFMA R8, R23, R8, R21 ; /* 0x0000000817087223 */ /* 0x008fcc0000000015 */ /*0aa0*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e30000020f100 */ /*0ab0*/ I2F R20, R8 ; /* 0x0000000800147306 */ /* 0x001e640000201400 */ /*0ac0*/ FFMA R20, R22, R9, R20 ; /* 0x0000000916147223 */ /* 0x002fcc0000000014 */ /*0ad0*/ F2I.TRUNC.NTZ R20, R20 ; /* 0x0000001400147305 */ /* 0x000e24000020f100 */ /*0ae0*/ ISETP.NE.OR P0, PT, R17, RZ, P0 ; /* 0x000000ff1100720c */ /* 0x000fda0000705670 */ /*0af0*/ @!P0 BRA 0xc80 ; /* 0x0000018000008947 */ /* 0x000fea0003800000 */ /*0b00*/ LDS R11, [R18+-0x100] ; /* 0xffff0000120b7984 */ /* 0x000fe20000000800 */ /*0b10*/ I2F R20, R20 ; /* 0x0000001400147306 */ /* 0x001e220000201400 */ /*0b20*/ IADD3 R17, R17, -0x4, RZ ; /* 0xfffffffc11117810 */ /* 0x000fe20007ffe0ff */ /*0b30*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */ /* 0x000fe2000fffe03f */ /*0b40*/ LDS.64 R8, [R19+-0x8] ; /* 0xfffff80013087984 */ /* 0x000e240000000a00 */ /*0b50*/ ISETP.NE.AND P0, PT, R17, RZ, PT ; /* 0x000000ff1100720c */ /* 0x000fe40003f05270 */ /*0b60*/ LDS R22, [R18+-0x80] ; /* 0xffff800012167984 */ /* 0x000e680000000800 */ /*0b70*/ LDS R23, [R18] ; /* 0x0000000012177984 */ /* 0x000fe20000000800 */ /*0b80*/ FFMA R8, R11, R8, R20 ; /* 0x000000080b087223 */ /* 0x001fc60000000014 */ /*0b90*/ LDS.64 R10, [R19] ; /* 0x00000000130a7984 */ /* 0x0000a60000000a00 */ /*0ba0*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000ee2000020f100 */ /*0bb0*/ IADD3 R19, R19, 0x10, RZ ; /* 0x0000001013137810 */ /* 0x001fce0007ffe0ff */ /*0bc0*/ I2F R21, R8 ; /* 0x0000000800157306 */ /* 0x008e640000201400 */ /*0bd0*/ FFMA R9, R22, R9, R21 ; /* 0x0000000916097223 */ /* 0x002fe40000000015 */ /*0be0*/ LDS R22, [R18+0x80] ; /* 0x0000800012167984 */ /* 0x0000680000000800 */ /*0bf0*/ F2I.TRUNC.NTZ R9, R9 ; /* 0x0000000900097305 */ /* 0x000ee2000020f100 */ /*0c00*/ IADD3 R18, R18, 0x200, RZ ; /* 0x0000020012127810 */ /* 0x001fce0007ffe0ff */ /*0c10*/ I2F R21, R9 ; /* 0x0000000900157306 */ /* 0x008ea40000201400 */ /*0c20*/ FFMA R10, R23, R10, R21 ; /* 0x0000000a170a7223 */ /* 0x004fcc0000000015 */ /*0c30*/ F2I.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000e30000020f100 */ /*0c40*/ I2F R20, R10 ; /* 0x0000000a00147306 */ /* 0x001e640000201400 */ /*0c50*/ FFMA R20, R22, R11, R20 ; /* 0x0000000b16147223 */ /* 0x002fcc0000000014 */ /*0c60*/ F2I.TRUNC.NTZ R20, R20 ; /* 0x0000001400147305 */ /* 0x000e24000020f100 */ /*0c70*/ @P0 BRA 0xb00 ; /* 0xfffffe8000000947 */ /* 0x001fea000383ffff */ /*0c80*/ ISETP.NE.AND P0, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */ /* 0x000fda0003f05270 */ /*0c90*/ @!P0 BRA 0xdd0 ; /* 0x0000013000008947 */ /* 0x000fea0003800000 */ /*0ca0*/ IMAD.U32 R8, RZ, RZ, UR4 ; /* 0x00000004ff087e24 */ /* 0x000fe2000f8e00ff */ /*0cb0*/ ULEA UR5, UR4, 0x1000, 0x7 ; /* 0x0000100004057891 */ /* 0x000fe2000f8e383f */ /*0cc0*/ I2F R20, R20 ; /* 0x0000001400147306 */ /* 0x001e220000201400 */ /*0cd0*/ ISETP.NE.AND P0, PT, R12, 0x1, PT ; /* 0x000000010c00780c */ /* 0x000fe40003f05270 */ /*0ce0*/ LEA R9, R8, R5, 0x2 ; /* 0x0000000508097211 */ /* 0x000fca00078e10ff */ /*0cf0*/ LDS R17, [R0.X4+UR5] ; /* 0x0000000500117984 */ /* 0x000fe80008004800 */ /*0d00*/ LDS.128 R8, [R9] ; /* 0x0000000009087984 */ /* 0x000e240000000c00 */ /*0d10*/ FFMA R8, R17, R8, R20 ; /* 0x0000000811087223 */ /* 0x001fc80000000014 */ /*0d20*/ F2I.TRUNC.NTZ R20, R8 ; /* 0x0000000800147305 */ /* 0x000062000020f100 */ /*0d30*/ @!P0 BRA 0xdd0 ; /* 0x0000009000008947 */ /* 0x000fea0003800000 */ /*0d40*/ LDS R8, [R0.X4+UR5+0x80] ; /* 0x0000800500087984 */ /* 0x001e220008004800 */ /*0d50*/ ISETP.NE.AND P0, PT, R12, 0x2, PT ; /* 0x000000020c00780c */ /* 0x000fe20003f05270 */ /*0d60*/ I2F R20, R20 ; /* 0x0000001400147306 */ /* 0x002e180000201400 */ /*0d70*/ @P0 LDS R11, [R0.X4+UR5+0x100] ; /* 0x00010005000b0984 */ /* 0x000e620008004800 */ /*0d80*/ FFMA R8, R8, R9, R20 ; /* 0x0000000908087223 */ /* 0x001fc80000000014 */ /*0d90*/ F2I.TRUNC.NTZ R20, R8 ; /* 0x0000000800147305 */ /* 0x000e30000020f100 */ /*0da0*/ @P0 I2F R9, R20 ; /* 0x0000001400090306 */ /* 0x001e640000201400 */ /*0db0*/ @P0 FFMA R10, R11, R10, R9 ; /* 0x0000000a0b0a0223 */ /* 0x002fcc0000000009 */ /*0dc0*/ @P0 F2I.TRUNC.NTZ R20, R10 ; /* 0x0000000a00140305 */ /* 0x000064000020f100 */ /*0dd0*/ MOV R8, c[0x0][0x0] ; /* 0x0000000000087a02 */ /* 0x001fe20000000f00 */ /*0de0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0df0*/ IADD3 R13, R13, c[0x0][0x0], RZ ; /* 0x000000000d0d7a10 */ /* 0x000fc60007ffe0ff */ /*0e00*/ IMAD R7, R8, c[0x0][0x180], R7 ; /* 0x0000600008077a24 */ /* 0x000fe400078e0207 */ /*0e10*/ @!P1 BRA 0x1b0 ; /* 0xfffff39000009947 */ /* 0x000fea000383ffff */ /*0e20*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0e30*/ IMAD R2, R2, c[0x0][0x180], R3 ; /* 0x0000600002027a24 */ /* 0x000fd200078e0203 */ /*0e40*/ IMAD.WIDE R2, R2, R5, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fca00078e0205 */ /*0e50*/ STG.E [R2.64], R20 ; /* 0x0000001402007986 */ /* 0x002fe2000c101906 */ /*0e60*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0e70*/ BRA 0xe70; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0e80*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0e90*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ea0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0eb0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ec0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ed0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ee0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ef0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z12compute_tilePiS_S_iii .globl _Z12compute_tilePiS_S_iii .p2align 8 .type _Z12compute_tilePiS_S_iii,@function _Z12compute_tilePiS_S_iii: s_clause 0x1 s_load_b32 s3, s[0:1], 0x20 s_load_b32 s2, s[0:1], 0x34 v_bfe_u32 v2, v0, 10, 10 v_and_b32_e32 v0, 0x3ff, v0 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) v_mul_lo_u32 v5, v2, s3 s_and_b32 s9, s2, 0xffff s_lshr_b32 s8, s2, 16 s_cmp_lt_i32 s3, 1 s_mul_i32 s14, s14, s9 s_cbranch_scc1 .LBB0_5 s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b128 s[4:7], s[0:1], 0x0 v_mad_u64_u32 v[6:7], null, s15, s8, v[2:3] v_lshlrev_b32_e32 v9, 2, v0 v_add3_u32 v1, v5, v0, s14 s_cmp_lg_u32 s9, 0 v_lshlrev_b32_e32 v7, 7, v2 s_cselect_b32 s10, -1, 0 s_mul_i32 s11, s9, s3 v_cndmask_b32_e64 v2, 0, 1, s10 s_mov_b32 s10, 0 s_waitcnt lgkmcnt(0) v_mad_u64_u32 v[3:4], null, v6, s2, v[0:1] v_mov_b32_e32 v6, 0 v_or_b32_e32 v8, 0x1000, v9 v_add_nc_u32_e32 v9, v7, v9 v_cmp_ne_u32_e64 s2, 1, v2 s_delay_alu instid0(VALU_DEP_3) v_add_nc_u32_e32 v10, v8, v7 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_3 .p2align 6 .LBB0_2: v_add_nc_u32_e32 v3, s9, v3 v_add_nc_u32_e32 v1, s11, v1 s_add_i32 s10, s10, s9 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_ge_i32 s10, s3 s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_6 .LBB0_3: v_ashrrev_i32_e32 v4, 31, v3 v_ashrrev_i32_e32 v2, 31, v1 s_mov_b32 s12, s9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[11:12], 2, v[3:4] v_lshlrev_b64 v[13:14], 2, v[1:2] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v11, vcc_lo, s4, v11 v_add_co_ci_u32_e32 v12, vcc_lo, s5, v12, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v13, vcc_lo, s6, v13 v_add_co_ci_u32_e32 v14, vcc_lo, s7, v14, vcc_lo s_and_b32 vcc_lo, exec_lo, s2 global_load_b32 v2, v[11:12], off global_load_b32 v4, v[13:14], off s_waitcnt vmcnt(1) v_cvt_f32_i32_e32 v11, v2 s_waitcnt vmcnt(0) v_cvt_f32_i32_e32 v12, v4 v_mov_b32_e32 v2, v8 v_mov_b32_e32 v4, v7 ds_store_b32 v9, v11 ds_store_b32 v10, v12 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_vccnz .LBB0_2 .LBB0_4: ds_load_b32 v11, v4 ds_load_b32 v12, v2 v_cvt_f32_i32_e32 v6, v6 v_add_nc_u32_e32 v4, 4, v4 v_add_nc_u32_e32 v2, 0x80, v2 s_add_i32 s12, s12, -1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_cmp_eq_u32 s12, 0 s_waitcnt lgkmcnt(0) v_fmac_f32_e32 v6, v11, v12 v_cvt_i32_f32_e32 v6, v6 s_cbranch_scc0 .LBB0_4 s_branch .LBB0_2 .LBB0_5: v_mov_b32_e32 v6, 0 .LBB0_6: s_set_inst_prefetch_distance 0x2 v_add_nc_u32_e32 v0, v5, v0 s_load_b64 s[0:1], s[0:1], 0x10 s_mul_i32 s2, s15, s8 s_delay_alu instid0(SALU_CYCLE_1) s_mul_i32 s2, s2, s3 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_add3_u32 v0, s14, s2, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v1, 31, v0 v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v6, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12compute_tilePiS_S_iii .amdhsa_group_segment_fixed_size 8192 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 15 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z12compute_tilePiS_S_iii, .Lfunc_end0-_Z12compute_tilePiS_S_iii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 8192 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12compute_tilePiS_S_iii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z12compute_tilePiS_S_iii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 15 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0006cacd_00000000-6_final_Eric_Sheeder.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "Usage: <./executable.exe n m p>, where n, m, and p are the dimensions of A (nxm) and B (mxp)" .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "CPU time was %f seconds\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $168, %rsp .cfi_def_cfa_offset 224 movl %edi, %ebx movq %rsi, %rbp movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT cmpl $4, %ebx jne .L28 movq 8(%rbp), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %r13 movl %eax, 20(%rsp) movq 16(%rbp), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %r15 movq %rax, 88(%rsp) movl %eax, %ebx movq 24(%rbp), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %r14 movq %rax, 80(%rsp) movl %eax, 16(%rsp) movq %r13, 24(%rsp) movl %r13d, %r12d imull %r15d, %r12d movslq %r12d, %r13 leal 0(,%r12,4), %eax movq %rax, (%rsp) movq %rax, %rdi call malloc@PLT movq %rax, %r15 movl %ebx, %eax imull %r14d, %eax movl %eax, 32(%rsp) movslq %eax, %rsi movq %rsi, 40(%rsp) leal 0(,%rax,4), %eax movq %rax, 48(%rsp) movq %rax, %rdi call malloc@PLT movq %rax, 8(%rsp) movl 24(%rsp), %eax imull %r14d, %eax movl %eax, 56(%rsp) movslq %eax, %rsi movq %rsi, 72(%rsp) leal 0(,%rax,4), %ebp movq %rbp, 64(%rsp) movq %rbp, %rdi call malloc@PLT movq %rax, %r14 movq %rbp, %rdi call malloc@PLT movq %rax, %rbp testl %r12d, %r12d jle .L5 movq %r15, %r12 leaq (%r15,%r13,4), %r13 .L6: call rand@PLT movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $34, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx leal (%rdx,%rdx,4), %edx addl %edx, %edx subl %edx, %eax addl $1, %eax movl %eax, (%r12) addq $4, %r12 cmpq %r13, %r12 jne .L6 .L5: cmpl $0, 32(%rsp) jle .L7 movq 8(%rsp), %rax movq %rax, %r12 movq 40(%rsp), %rsi leaq (%rax,%rsi,4), %r13 .L8: call rand@PLT movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $34, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx leal (%rdx,%rdx,4), %edx addl %edx, %edx subl %edx, %eax addl $1, %eax movl %eax, (%r12) addq $4, %r12 cmpq %r13, %r12 jne .L8 .L7: cmpl $0, 56(%rsp) jle .L9 movq 72(%rsp), %rdx salq $2, %rdx movl $0, %eax .L10: movl $0, (%r14,%rax) movl $0, 0(%rbp,%rax) addq $4, %rax cmpq %rdx, %rax jne .L10 .L9: leaq 128(%rsp), %rdi movq (%rsp), %rsi call cudaMalloc@PLT leaq 136(%rsp), %rdi movq 48(%rsp), %r12 movq %r12, %rsi call cudaMalloc@PLT leaq 144(%rsp), %rdi movq 64(%rsp), %r13 movq %r13, %rsi call cudaMalloc@PLT movl $1, %ecx movq (%rsp), %rdx movq %r15, %rsi movq 128(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %r12, %rdx movq 8(%rsp), %rsi movq 136(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %r13, %rdx movq %rbp, %rsi movq 144(%rsp), %rdi call cudaMemcpy@PLT leaq 112(%rsp), %rdi call cudaEventCreate@PLT leaq 120(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 112(%rsp), %rdi call cudaEventRecord@PLT movl $0, %esi movq 120(%rsp), %rdi call cudaEventRecord@PLT movq 120(%rsp), %rdi call cudaEventSynchronize@PLT leaq 108(%rsp), %rdi movq 120(%rsp), %rdx movq 112(%rsp), %rsi call cudaEventElapsedTime@PLT call clock@PLT movq %rax, 72(%rsp) cmpl $0, 24(%rsp) jle .L11 movq 80(%rsp), %rdi movl %edi, %esi movq 88(%rsp), %rdx movl %edx, 24(%rsp) movslq %edi, %r8 salq $2, %r8 movl $0, %r13d movl $0, %ecx movl $0, %eax leal -1(%rdi), %r12d leal -1(%rdx), %edi movq %rdi, 48(%rsp) leaq 4(%r15), %rdi movq %rdi, 56(%rsp) movq %r15, 32(%rsp) movq %r14, 40(%rsp) movq %rbp, 80(%rsp) jmp .L12 .L28: leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L13: movl (%rax), %edx imull (%rdi), %edx addl %edx, %r9d addq $4, %rax addq %r8, %rdi cmpq %r10, %rax jne .L13 .L15: movl %r9d, (%r14,%r11,4) leaq 1(%r11), %rax addq $4, %rbp cmpq %r12, %r11 je .L24 movq %rax, %r11 .L16: movq %rbp, %rdi movq %r15, %rax movl $0, %r9d testl %ebx, %ebx jg .L13 jmp .L15 .L24: movl (%rsp), %eax .L14: addl $1, %eax addl %esi, %ecx movl 24(%rsp), %edi addl %edi, %r13d movl 20(%rsp), %edi cmpl %edi, %eax je .L26 .L12: cmpl $0, 16(%rsp) jle .L14 movq 8(%rsp), %rbp movslq %r13d, %r9 movq 32(%rsp), %rdi leaq (%rdi,%r9,4), %r15 movq 48(%rsp), %rdi addq %rdi, %r9 movq 56(%rsp), %rdi leaq (%rdi,%r9,4), %r10 movslq %ecx, %r9 movq 40(%rsp), %rdi leaq (%rdi,%r9,4), %r14 movl $0, %r11d movl %eax, (%rsp) jmp .L16 .L26: movq 32(%rsp), %r15 movq 40(%rsp), %r14 movq 80(%rsp), %rbp .L11: call clock@PLT movq 72(%rsp), %rsi subq %rsi, %rax pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 divsd .LC1(%rip), %xmm0 movq %xmm0, %rbx movl $2, %ecx movq 64(%rsp), %rdx movq 144(%rsp), %rsi movq %rbp, %rdi call cudaMemcpy@PLT call cudaDeviceSynchronize@PLT movq %rbx, %xmm0 leaq .LC2(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq %r15, %rdi call free@PLT movq 8(%rsp), %rdi call free@PLT movq %r14, %rdi call free@PLT movq %rbp, %rdi call free@PLT movq 128(%rsp), %rdi call cudaFree@PLT movq 136(%rsp), %rdi call cudaFree@PLT movq 144(%rsp), %rdi call cudaFree@PLT call cudaDeviceReset@PLT movq 152(%rsp), %rax subq %fs:40, %rax jne .L29 movl $0, %eax addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .globl _Z39__device_stub__Z12compute_tilePiS_S_iiiPiS_S_iii .type _Z39__device_stub__Z12compute_tilePiS_S_iiiPiS_S_iii, @function _Z39__device_stub__Z12compute_tilePiS_S_iiiPiS_S_iii: .LFB2082: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 20(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L34 .L30: movq 168(%rsp), %rax subq %fs:40, %rax jne .L35 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L34: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z12compute_tilePiS_S_iii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L30 .L35: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z39__device_stub__Z12compute_tilePiS_S_iiiPiS_S_iii, .-_Z39__device_stub__Z12compute_tilePiS_S_iiiPiS_S_iii .globl _Z12compute_tilePiS_S_iii .type _Z12compute_tilePiS_S_iii, @function _Z12compute_tilePiS_S_iii: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z12compute_tilePiS_S_iiiPiS_S_iii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z12compute_tilePiS_S_iii, .-_Z12compute_tilePiS_S_iii .section .rodata.str1.1 .LC3: .string "_Z12compute_tilePiS_S_iii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z12compute_tilePiS_S_iii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long 0 .long 1093567616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "final_Eric_Sheeder.hip" .globl _Z27__device_stub__compute_tilePiS_S_iii # -- Begin function _Z27__device_stub__compute_tilePiS_S_iii .p2align 4, 0x90 .type _Z27__device_stub__compute_tilePiS_S_iii,@function _Z27__device_stub__compute_tilePiS_S_iii: # @_Z27__device_stub__compute_tilePiS_S_iii .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 20(%rsp), %rax movq %rax, 120(%rsp) leaq 16(%rsp), %rax movq %rax, 128(%rsp) leaq 12(%rsp), %rax movq %rax, 136(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z12compute_tilePiS_S_iii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z27__device_stub__compute_tilePiS_S_iii, .Lfunc_end0-_Z27__device_stub__compute_tilePiS_S_iii .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI1_0: .quad 0x412e848000000000 # double 1.0E+6 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $88, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rsi, %rbx movl %edi, %ebp xorl %edi, %edi callq time movl %eax, %edi callq srand cmpl $4, %ebp jne .LBB1_10 # %bb.1: movq 8(%rbx), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r13 movq 16(%rbx), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r12 movq 24(%rbx), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r15 movl %r12d, %ebp imull %r13d, %ebp leal (,%rbp,4), %edi movq %rdi, 8(%rsp) # 8-byte Spill callq malloc movq %rax, %rbx imull %r15d, %r12d leal (,%r12,4), %edi movq %rdi, 56(%rsp) # 8-byte Spill callq malloc movq %rax, %r14 imull %r13d, %r15d leal (,%r15,4), %edi movq %rdi, 64(%rsp) # 8-byte Spill callq malloc movq %rax, 72(%rsp) # 8-byte Spill testl %ebp, %ebp jle .LBB1_4 # %bb.2: # %.lr.ph.preheader movl %ebp, %r13d xorl %ebp, %ebp .p2align 4, 0x90 .LBB1_3: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $34, %rcx addl %edx, %ecx addl %ecx, %ecx leal (%rcx,%rcx,4), %ecx negl %ecx addl %ecx, %eax incl %eax movl %eax, (%rbx,%rbp,4) incq %rbp cmpq %rbp, %r13 jne .LBB1_3 .LBB1_4: # %.preheader93 testl %r12d, %r12d jle .LBB1_7 # %bb.5: # %.lr.ph96.preheader movl %r12d, %r12d xorl %r13d, %r13d .p2align 4, 0x90 .LBB1_6: # %.lr.ph96 # =>This Inner Loop Header: Depth=1 callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $34, %rcx addl %edx, %ecx addl %ecx, %ecx leal (%rcx,%rcx,4), %ecx negl %ecx addl %ecx, %eax incl %eax movl %eax, (%r14,%r13,4) incq %r13 cmpq %r13, %r12 jne .LBB1_6 .LBB1_7: # %.preheader92 testl %r15d, %r15d movq 72(%rsp), %r12 # 8-byte Reload jle .LBB1_9 # %bb.8: # %.lr.ph98.preheader movl %r15d, %edx shlq $2, %rdx movq %r12, %rdi xorl %esi, %esi callq memset@PLT .LBB1_9: # %._crit_edge leaq 40(%rsp), %rdi movq 8(%rsp), %r15 # 8-byte Reload movq %r15, %rsi callq hipMalloc leaq 32(%rsp), %rdi movq 56(%rsp), %rbp # 8-byte Reload movq %rbp, %rsi callq hipMalloc leaq 16(%rsp), %rdi movq 64(%rsp), %r13 # 8-byte Reload movq %r13, %rsi callq hipMalloc movq 40(%rsp), %rdi movq %rbx, %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy movq 32(%rsp), %rdi movq %r14, %rsi movq %rbp, %rdx movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movq %r12, %rsi movq %r13, %rdx movl $1, %ecx callq hipMemcpy leaq 48(%rsp), %rdi callq hipEventCreate leaq 24(%rsp), %rdi callq hipEventCreate movq 48(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 24(%rsp), %rdi callq hipEventSynchronize movq 48(%rsp), %rsi movq 24(%rsp), %rdx leaq 84(%rsp), %rdi callq hipEventElapsedTime callq clock movq %rax, %r15 callq clock subq %r15, %rax cvtsi2sd %rax, %xmm0 divsd .LCPI1_0(%rip), %xmm0 movsd %xmm0, 8(%rsp) # 8-byte Spill movq 16(%rsp), %rsi movq %r12, %rdi movq %r13, %rdx movl $2, %ecx callq hipMemcpy callq hipDeviceSynchronize movl $.L.str.1, %edi movsd 8(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movb $1, %al callq printf movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r12, %rdi callq free movq 40(%rsp), %rdi callq hipFree movq 32(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree callq hipDeviceReset xorl %eax, %eax addq $88, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB1_10: .cfi_def_cfa_offset 144 movl $.L.str, %edi xorl %eax, %eax callq printf movl $1, %edi callq exit .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12compute_tilePiS_S_iii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z12compute_tilePiS_S_iii,@object # @_Z12compute_tilePiS_S_iii .section .rodata,"a",@progbits .globl _Z12compute_tilePiS_S_iii .p2align 3, 0x0 _Z12compute_tilePiS_S_iii: .quad _Z27__device_stub__compute_tilePiS_S_iii .size _Z12compute_tilePiS_S_iii, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Usage: <./executable.exe n m p>, where n, m, and p are the dimensions of A (nxm) and B (mxp)" .size .L.str, 93 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "CPU time was %f seconds\n" .size .L.str.1, 25 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z12compute_tilePiS_S_iii" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__compute_tilePiS_S_iii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12compute_tilePiS_S_iii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> int main(int argc, char **argv) { printf("Hello World from CPU!\n"); return 0; }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> int main(int argc, char **argv) { printf("Hello World from CPU!\n"); return 0; }
.file "tmpxft_000d5242_00000000-6_helloworld.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Hello World from CPU!\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $0, %eax addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4: