system
stringclasses
7 values
input
stringlengths
10
94.5k
output
stringlengths
14
55.5k
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // nvcc -arch sm_50 -cubin hconv_fprop_K64_N64.cu extern "C" __global__ void __launch_bounds__(64) hconv_fprop_K64_N64 ( short* param_Rand, short* param_O, const short* param_I, const short* param_F, float param_alpha, int param_flags, int param_N, int param_K, int param_D, int param_H, int param_W, int param_WN, int param_HWN, int param_DHWN, int param_C, int param_CRST, int param_RST, int param_magic_RST, int param_shift_RST, int param_RS, int param_magic_RS, int param_shift_RS, int param_S, int param_magic_S, int param_shift_S, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_d, int param_str_h, int param_str_w, int param_P, int param_Q, int param_PQ, int param_QN, int param_PQN, int param_MPQN, int param_magic_Q, int param_shift_Q, int param_magic_PQ, int param_shift_PQ, int param_part_P, int param_part_Q, int param_part_PQ ) { __shared__ float share[64*8*4 + 8]; int tid = threadIdx.x; share[tid] = 1; *param_O = share[63-tid]; }
#include <hip/hip_runtime.h> /* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // nvcc -arch sm_50 -cubin hconv_fprop_K64_N64.cu extern "C" __global__ void __launch_bounds__(64) hconv_fprop_K64_N64 ( short* param_Rand, short* param_O, const short* param_I, const short* param_F, float param_alpha, int param_flags, int param_N, int param_K, int param_D, int param_H, int param_W, int param_WN, int param_HWN, int param_DHWN, int param_C, int param_CRST, int param_RST, int param_magic_RST, int param_shift_RST, int param_RS, int param_magic_RS, int param_shift_RS, int param_S, int param_magic_S, int param_shift_S, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_d, int param_str_h, int param_str_w, int param_P, int param_Q, int param_PQ, int param_QN, int param_PQN, int param_MPQN, int param_magic_Q, int param_shift_Q, int param_magic_PQ, int param_shift_PQ, int param_part_P, int param_part_Q, int param_part_PQ ) { __shared__ float share[64*8*4 + 8]; int tid = threadIdx.x; share[tid] = 1; *param_O = share[63-tid]; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> /* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // nvcc -arch sm_50 -cubin hconv_fprop_K64_N64.cu extern "C" __global__ void __launch_bounds__(64) hconv_fprop_K64_N64 ( short* param_Rand, short* param_O, const short* param_I, const short* param_F, float param_alpha, int param_flags, int param_N, int param_K, int param_D, int param_H, int param_W, int param_WN, int param_HWN, int param_DHWN, int param_C, int param_CRST, int param_RST, int param_magic_RST, int param_shift_RST, int param_RS, int param_magic_RS, int param_shift_RS, int param_S, int param_magic_S, int param_shift_S, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_d, int param_str_h, int param_str_w, int param_P, int param_Q, int param_PQ, int param_QN, int param_PQN, int param_MPQN, int param_magic_Q, int param_shift_Q, int param_magic_PQ, int param_shift_PQ, int param_part_P, int param_part_Q, int param_part_PQ ) { __shared__ float share[64*8*4 + 8]; int tid = threadIdx.x; share[tid] = 1; *param_O = share[63-tid]; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected hconv_fprop_K64_N64 .globl hconv_fprop_K64_N64 .p2align 8 .type hconv_fprop_K64_N64,@function hconv_fprop_K64_N64: v_xor_b32_e32 v1, 63, v0 v_lshlrev_b32_e32 v0, 2, v0 v_mov_b32_e32 v2, 1.0 s_load_b64 s[0:1], s[0:1], 0x8 s_delay_alu instid0(VALU_DEP_3) v_lshlrev_b32_e32 v1, 2, v1 ds_store_b32 v0, v2 ds_load_b32 v0, v1 v_mov_b32_e32 v1, 0 s_waitcnt lgkmcnt(0) v_cvt_i32_f32_e32 v0, v0 global_store_b16 v1, v0, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel hconv_fprop_K64_N64 .amdhsa_group_segment_fixed_size 8224 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 192 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 3 .amdhsa_next_free_sgpr 2 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size hconv_fprop_K64_N64, .Lfunc_end0-hconv_fprop_K64_N64 .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: by_value - .offset: 36 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: by_value - .offset: 44 .size: 4 .value_kind: by_value - .offset: 48 .size: 4 .value_kind: by_value - .offset: 52 .size: 4 .value_kind: by_value - .offset: 56 .size: 4 .value_kind: by_value - .offset: 60 .size: 4 .value_kind: by_value - .offset: 64 .size: 4 .value_kind: by_value - .offset: 68 .size: 4 .value_kind: by_value - .offset: 72 .size: 4 .value_kind: by_value - .offset: 76 .size: 4 .value_kind: by_value - .offset: 80 .size: 4 .value_kind: by_value - .offset: 84 .size: 4 .value_kind: by_value - .offset: 88 .size: 4 .value_kind: by_value - .offset: 92 .size: 4 .value_kind: by_value - .offset: 96 .size: 4 .value_kind: by_value - .offset: 100 .size: 4 .value_kind: by_value - .offset: 104 .size: 4 .value_kind: by_value - .offset: 108 .size: 4 .value_kind: by_value - .offset: 112 .size: 4 .value_kind: by_value - .offset: 116 .size: 4 .value_kind: by_value - .offset: 120 .size: 4 .value_kind: by_value - .offset: 124 .size: 4 .value_kind: by_value - .offset: 128 .size: 4 .value_kind: by_value - .offset: 132 .size: 4 .value_kind: by_value - .offset: 136 .size: 4 .value_kind: by_value - .offset: 140 .size: 4 .value_kind: by_value - .offset: 144 .size: 4 .value_kind: by_value - .offset: 148 .size: 4 .value_kind: by_value - .offset: 152 .size: 4 .value_kind: by_value - .offset: 156 .size: 4 .value_kind: by_value - .offset: 160 .size: 4 .value_kind: by_value - .offset: 164 .size: 4 .value_kind: by_value - .offset: 168 .size: 4 .value_kind: by_value - .offset: 172 .size: 4 .value_kind: by_value - .offset: 176 .size: 4 .value_kind: by_value - .offset: 180 .size: 4 .value_kind: by_value - .offset: 184 .size: 4 .value_kind: by_value - .offset: 188 .size: 4 .value_kind: by_value .group_segment_fixed_size: 8224 .kernarg_segment_align: 8 .kernarg_segment_size: 192 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 64 .name: hconv_fprop_K64_N64 .private_segment_fixed_size: 0 .sgpr_count: 2 .sgpr_spill_count: 0 .symbol: hconv_fprop_K64_N64.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 3 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> /* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // nvcc -arch sm_50 -cubin hconv_fprop_K64_N64.cu extern "C" __global__ void __launch_bounds__(64) hconv_fprop_K64_N64 ( short* param_Rand, short* param_O, const short* param_I, const short* param_F, float param_alpha, int param_flags, int param_N, int param_K, int param_D, int param_H, int param_W, int param_WN, int param_HWN, int param_DHWN, int param_C, int param_CRST, int param_RST, int param_magic_RST, int param_shift_RST, int param_RS, int param_magic_RS, int param_shift_RS, int param_S, int param_magic_S, int param_shift_S, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_d, int param_str_h, int param_str_w, int param_P, int param_Q, int param_PQ, int param_QN, int param_PQN, int param_MPQN, int param_magic_Q, int param_shift_Q, int param_magic_PQ, int param_shift_PQ, int param_part_P, int param_part_Q, int param_part_PQ ) { __shared__ float share[64*8*4 + 8]; int tid = threadIdx.x; share[tid] = 1; *param_O = share[63-tid]; }
.text .file "hconv_fprop_K64_N64.hip" .globl __device_stub__hconv_fprop_K64_N64 # -- Begin function __device_stub__hconv_fprop_K64_N64 .p2align 4, 0x90 .type __device_stub__hconv_fprop_K64_N64,@function __device_stub__hconv_fprop_K64_N64: # @__device_stub__hconv_fprop_K64_N64 .cfi_startproc # %bb.0: subq $456, %rsp # imm = 0x1C8 .cfi_def_cfa_offset 464 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movss %xmm0, 12(%rsp) movl %r8d, 8(%rsp) movl %r9d, 4(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 4(%rsp), %rax movq %rax, 144(%rsp) leaq 464(%rsp), %rax movq %rax, 152(%rsp) leaq 472(%rsp), %rax movq %rax, 160(%rsp) leaq 480(%rsp), %rax movq %rax, 168(%rsp) leaq 488(%rsp), %rax movq %rax, 176(%rsp) leaq 496(%rsp), %rax movq %rax, 184(%rsp) leaq 504(%rsp), %rax movq %rax, 192(%rsp) leaq 512(%rsp), %rax movq %rax, 200(%rsp) leaq 520(%rsp), %rax movq %rax, 208(%rsp) leaq 528(%rsp), %rax movq %rax, 216(%rsp) leaq 536(%rsp), %rax movq %rax, 224(%rsp) leaq 544(%rsp), %rax movq %rax, 232(%rsp) leaq 552(%rsp), %rax movq %rax, 240(%rsp) leaq 560(%rsp), %rax movq %rax, 248(%rsp) leaq 568(%rsp), %rax movq %rax, 256(%rsp) leaq 576(%rsp), %rax movq %rax, 264(%rsp) leaq 584(%rsp), %rax movq %rax, 272(%rsp) leaq 592(%rsp), %rax movq %rax, 280(%rsp) leaq 600(%rsp), %rax movq %rax, 288(%rsp) leaq 608(%rsp), %rax movq %rax, 296(%rsp) leaq 616(%rsp), %rax movq %rax, 304(%rsp) leaq 624(%rsp), %rax movq %rax, 312(%rsp) leaq 632(%rsp), %rax movq %rax, 320(%rsp) leaq 640(%rsp), %rax movq %rax, 328(%rsp) leaq 648(%rsp), %rax movq %rax, 336(%rsp) leaq 656(%rsp), %rax movq %rax, 344(%rsp) leaq 664(%rsp), %rax movq %rax, 352(%rsp) leaq 672(%rsp), %rax movq %rax, 360(%rsp) leaq 680(%rsp), %rax movq %rax, 368(%rsp) leaq 688(%rsp), %rax movq %rax, 376(%rsp) leaq 696(%rsp), %rax movq %rax, 384(%rsp) leaq 704(%rsp), %rax movq %rax, 392(%rsp) leaq 712(%rsp), %rax movq %rax, 400(%rsp) leaq 720(%rsp), %rax movq %rax, 408(%rsp) leaq 728(%rsp), %rax movq %rax, 416(%rsp) leaq 736(%rsp), %rax movq %rax, 424(%rsp) leaq 744(%rsp), %rax movq %rax, 432(%rsp) leaq 752(%rsp), %rax movq %rax, 440(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 96(%rsp), %r9 movl $hconv_fprop_K64_N64, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $472, %rsp # imm = 0x1D8 .cfi_adjust_cfa_offset -472 retq .Lfunc_end0: .size __device_stub__hconv_fprop_K64_N64, .Lfunc_end0-__device_stub__hconv_fprop_K64_N64 .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $hconv_fprop_K64_N64, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type hconv_fprop_K64_N64,@object # @hconv_fprop_K64_N64 .section .rodata,"a",@progbits .globl hconv_fprop_K64_N64 .p2align 3, 0x0 hconv_fprop_K64_N64: .quad __device_stub__hconv_fprop_K64_N64 .size hconv_fprop_K64_N64, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "hconv_fprop_K64_N64" .size .L__unnamed_1, 20 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__hconv_fprop_K64_N64 .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym hconv_fprop_K64_N64 .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : hconv_fprop_K64_N64 .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */ /* 0x000e220000002100 */ /*0020*/ IMAD.MOV.U32 R7, RZ, RZ, 0x3f800000 ; /* 0x3f800000ff077424 */ /* 0x000fe200078e00ff */ /*0030*/ MOV R2, c[0x0][0x168] ; /* 0x00005a0000027a02 */ /* 0x000fe20000000f00 */ /*0040*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0050*/ MOV R3, c[0x0][0x16c] ; /* 0x00005b0000037a02 */ /* 0x000fe40000000f00 */ /*0060*/ IADD3 R0, -R4, 0x3f, RZ ; /* 0x0000003f04007810 */ /* 0x001fe20007ffe1ff */ /*0070*/ STS [R4.X4], R7 ; /* 0x0000000704007388 */ /* 0x000fea0000004800 */ /*0080*/ LDS R0, [R0.X4] ; /* 0x0000000000007984 */ /* 0x000e240000004800 */ /*0090*/ F2I.TRUNC.NTZ R5, R0 ; /* 0x0000000000057305 */ /* 0x001e24000020f100 */ /*00a0*/ STG.E.U16 [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x001fe2000c101504 */ /*00b0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected hconv_fprop_K64_N64 .globl hconv_fprop_K64_N64 .p2align 8 .type hconv_fprop_K64_N64,@function hconv_fprop_K64_N64: v_xor_b32_e32 v1, 63, v0 v_lshlrev_b32_e32 v0, 2, v0 v_mov_b32_e32 v2, 1.0 s_load_b64 s[0:1], s[0:1], 0x8 s_delay_alu instid0(VALU_DEP_3) v_lshlrev_b32_e32 v1, 2, v1 ds_store_b32 v0, v2 ds_load_b32 v0, v1 v_mov_b32_e32 v1, 0 s_waitcnt lgkmcnt(0) v_cvt_i32_f32_e32 v0, v0 global_store_b16 v1, v0, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel hconv_fprop_K64_N64 .amdhsa_group_segment_fixed_size 8224 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 192 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 3 .amdhsa_next_free_sgpr 2 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size hconv_fprop_K64_N64, .Lfunc_end0-hconv_fprop_K64_N64 .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: by_value - .offset: 36 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: by_value - .offset: 44 .size: 4 .value_kind: by_value - .offset: 48 .size: 4 .value_kind: by_value - .offset: 52 .size: 4 .value_kind: by_value - .offset: 56 .size: 4 .value_kind: by_value - .offset: 60 .size: 4 .value_kind: by_value - .offset: 64 .size: 4 .value_kind: by_value - .offset: 68 .size: 4 .value_kind: by_value - .offset: 72 .size: 4 .value_kind: by_value - .offset: 76 .size: 4 .value_kind: by_value - .offset: 80 .size: 4 .value_kind: by_value - .offset: 84 .size: 4 .value_kind: by_value - .offset: 88 .size: 4 .value_kind: by_value - .offset: 92 .size: 4 .value_kind: by_value - .offset: 96 .size: 4 .value_kind: by_value - .offset: 100 .size: 4 .value_kind: by_value - .offset: 104 .size: 4 .value_kind: by_value - .offset: 108 .size: 4 .value_kind: by_value - .offset: 112 .size: 4 .value_kind: by_value - .offset: 116 .size: 4 .value_kind: by_value - .offset: 120 .size: 4 .value_kind: by_value - .offset: 124 .size: 4 .value_kind: by_value - .offset: 128 .size: 4 .value_kind: by_value - .offset: 132 .size: 4 .value_kind: by_value - .offset: 136 .size: 4 .value_kind: by_value - .offset: 140 .size: 4 .value_kind: by_value - .offset: 144 .size: 4 .value_kind: by_value - .offset: 148 .size: 4 .value_kind: by_value - .offset: 152 .size: 4 .value_kind: by_value - .offset: 156 .size: 4 .value_kind: by_value - .offset: 160 .size: 4 .value_kind: by_value - .offset: 164 .size: 4 .value_kind: by_value - .offset: 168 .size: 4 .value_kind: by_value - .offset: 172 .size: 4 .value_kind: by_value - .offset: 176 .size: 4 .value_kind: by_value - .offset: 180 .size: 4 .value_kind: by_value - .offset: 184 .size: 4 .value_kind: by_value - .offset: 188 .size: 4 .value_kind: by_value .group_segment_fixed_size: 8224 .kernarg_segment_align: 8 .kernarg_segment_size: 192 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 64 .name: hconv_fprop_K64_N64 .private_segment_fixed_size: 0 .sgpr_count: 2 .sgpr_spill_count: 0 .symbol: hconv_fprop_K64_N64.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 3 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0013a9d8_00000000-6_hconv_fprop_K64_N64.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z87__device_stub__Z19hconv_fprop_K64_N64PsS_PKsS1_fiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiPsS_PKsS1_fiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii .type _Z87__device_stub__Z19hconv_fprop_K64_N64PsS_PKsS1_fiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiPsS_PKsS1_fiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii, @function _Z87__device_stub__Z19hconv_fprop_K64_N64PsS_PKsS1_fiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiPsS_PKsS1_fiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii: .LFB2051: .cfi_startproc endbr64 subq $488, %rsp .cfi_def_cfa_offset 496 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movss %xmm0, 12(%rsp) movl %r8d, 8(%rsp) movl %r9d, 4(%rsp) movq %fs:40, %rax movq %rax, 472(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 12(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) leaq 4(%rsp), %rax movq %rax, 160(%rsp) leaq 496(%rsp), %rax movq %rax, 168(%rsp) leaq 504(%rsp), %rax movq %rax, 176(%rsp) leaq 512(%rsp), %rax movq %rax, 184(%rsp) leaq 520(%rsp), %rax movq %rax, 192(%rsp) leaq 528(%rsp), %rax movq %rax, 200(%rsp) leaq 536(%rsp), %rax movq %rax, 208(%rsp) leaq 544(%rsp), %rax movq %rax, 216(%rsp) leaq 552(%rsp), %rax movq %rax, 224(%rsp) leaq 560(%rsp), %rax movq %rax, 232(%rsp) leaq 568(%rsp), %rax movq %rax, 240(%rsp) leaq 576(%rsp), %rax movq %rax, 248(%rsp) leaq 584(%rsp), %rax movq %rax, 256(%rsp) leaq 592(%rsp), %rax movq %rax, 264(%rsp) leaq 600(%rsp), %rax movq %rax, 272(%rsp) leaq 608(%rsp), %rax movq %rax, 280(%rsp) leaq 616(%rsp), %rax movq %rax, 288(%rsp) leaq 624(%rsp), %rax movq %rax, 296(%rsp) leaq 632(%rsp), %rax movq %rax, 304(%rsp) leaq 640(%rsp), %rax movq %rax, 312(%rsp) leaq 648(%rsp), %rax movq %rax, 320(%rsp) leaq 656(%rsp), %rax movq %rax, 328(%rsp) leaq 664(%rsp), %rax movq %rax, 336(%rsp) leaq 672(%rsp), %rax movq %rax, 344(%rsp) leaq 680(%rsp), %rax movq %rax, 352(%rsp) leaq 688(%rsp), %rax movq %rax, 360(%rsp) leaq 696(%rsp), %rax movq %rax, 368(%rsp) leaq 704(%rsp), %rax movq %rax, 376(%rsp) leaq 712(%rsp), %rax movq %rax, 384(%rsp) leaq 720(%rsp), %rax movq %rax, 392(%rsp) leaq 728(%rsp), %rax movq %rax, 400(%rsp) leaq 736(%rsp), %rax movq %rax, 408(%rsp) leaq 744(%rsp), %rax movq %rax, 416(%rsp) leaq 752(%rsp), %rax movq %rax, 424(%rsp) leaq 760(%rsp), %rax movq %rax, 432(%rsp) leaq 768(%rsp), %rax movq %rax, 440(%rsp) leaq 776(%rsp), %rax movq %rax, 448(%rsp) leaq 784(%rsp), %rax movq %rax, 456(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 472(%rsp), %rax subq %fs:40, %rax jne .L8 addq $488, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 504 pushq 56(%rsp) .cfi_def_cfa_offset 512 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq hconv_fprop_K64_N64(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 496 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z87__device_stub__Z19hconv_fprop_K64_N64PsS_PKsS1_fiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiPsS_PKsS1_fiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii, .-_Z87__device_stub__Z19hconv_fprop_K64_N64PsS_PKsS1_fiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiPsS_PKsS1_fiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii .globl hconv_fprop_K64_N64 .type hconv_fprop_K64_N64, @function hconv_fprop_K64_N64: .LFB2052: .cfi_startproc endbr64 subq $16, %rsp .cfi_def_cfa_offset 24 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 32 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 40 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 48 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 56 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 64 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 72 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 80 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 88 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 96 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 104 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 112 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 120 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 128 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 136 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 144 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 152 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 160 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 168 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 176 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 184 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 192 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 200 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 208 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 216 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 224 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 232 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 240 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 248 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 256 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 264 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 272 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 280 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 288 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 296 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 304 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 312 movl 312(%rsp), %eax pushq %rax .cfi_def_cfa_offset 320 call _Z87__device_stub__Z19hconv_fprop_K64_N64PsS_PKsS1_fiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiPsS_PKsS1_fiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii addq $312, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size hconv_fprop_K64_N64, .-hconv_fprop_K64_N64 .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "hconv_fprop_K64_N64" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq hconv_fprop_K64_N64(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "hconv_fprop_K64_N64.hip" .globl __device_stub__hconv_fprop_K64_N64 # -- Begin function __device_stub__hconv_fprop_K64_N64 .p2align 4, 0x90 .type __device_stub__hconv_fprop_K64_N64,@function __device_stub__hconv_fprop_K64_N64: # @__device_stub__hconv_fprop_K64_N64 .cfi_startproc # %bb.0: subq $456, %rsp # imm = 0x1C8 .cfi_def_cfa_offset 464 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movss %xmm0, 12(%rsp) movl %r8d, 8(%rsp) movl %r9d, 4(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 4(%rsp), %rax movq %rax, 144(%rsp) leaq 464(%rsp), %rax movq %rax, 152(%rsp) leaq 472(%rsp), %rax movq %rax, 160(%rsp) leaq 480(%rsp), %rax movq %rax, 168(%rsp) leaq 488(%rsp), %rax movq %rax, 176(%rsp) leaq 496(%rsp), %rax movq %rax, 184(%rsp) leaq 504(%rsp), %rax movq %rax, 192(%rsp) leaq 512(%rsp), %rax movq %rax, 200(%rsp) leaq 520(%rsp), %rax movq %rax, 208(%rsp) leaq 528(%rsp), %rax movq %rax, 216(%rsp) leaq 536(%rsp), %rax movq %rax, 224(%rsp) leaq 544(%rsp), %rax movq %rax, 232(%rsp) leaq 552(%rsp), %rax movq %rax, 240(%rsp) leaq 560(%rsp), %rax movq %rax, 248(%rsp) leaq 568(%rsp), %rax movq %rax, 256(%rsp) leaq 576(%rsp), %rax movq %rax, 264(%rsp) leaq 584(%rsp), %rax movq %rax, 272(%rsp) leaq 592(%rsp), %rax movq %rax, 280(%rsp) leaq 600(%rsp), %rax movq %rax, 288(%rsp) leaq 608(%rsp), %rax movq %rax, 296(%rsp) leaq 616(%rsp), %rax movq %rax, 304(%rsp) leaq 624(%rsp), %rax movq %rax, 312(%rsp) leaq 632(%rsp), %rax movq %rax, 320(%rsp) leaq 640(%rsp), %rax movq %rax, 328(%rsp) leaq 648(%rsp), %rax movq %rax, 336(%rsp) leaq 656(%rsp), %rax movq %rax, 344(%rsp) leaq 664(%rsp), %rax movq %rax, 352(%rsp) leaq 672(%rsp), %rax movq %rax, 360(%rsp) leaq 680(%rsp), %rax movq %rax, 368(%rsp) leaq 688(%rsp), %rax movq %rax, 376(%rsp) leaq 696(%rsp), %rax movq %rax, 384(%rsp) leaq 704(%rsp), %rax movq %rax, 392(%rsp) leaq 712(%rsp), %rax movq %rax, 400(%rsp) leaq 720(%rsp), %rax movq %rax, 408(%rsp) leaq 728(%rsp), %rax movq %rax, 416(%rsp) leaq 736(%rsp), %rax movq %rax, 424(%rsp) leaq 744(%rsp), %rax movq %rax, 432(%rsp) leaq 752(%rsp), %rax movq %rax, 440(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 96(%rsp), %r9 movl $hconv_fprop_K64_N64, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $472, %rsp # imm = 0x1D8 .cfi_adjust_cfa_offset -472 retq .Lfunc_end0: .size __device_stub__hconv_fprop_K64_N64, .Lfunc_end0-__device_stub__hconv_fprop_K64_N64 .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $hconv_fprop_K64_N64, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type hconv_fprop_K64_N64,@object # @hconv_fprop_K64_N64 .section .rodata,"a",@progbits .globl hconv_fprop_K64_N64 .p2align 3, 0x0 hconv_fprop_K64_N64: .quad __device_stub__hconv_fprop_K64_N64 .size hconv_fprop_K64_N64, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "hconv_fprop_K64_N64" .size .L__unnamed_1, 20 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__hconv_fprop_K64_N64 .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym hconv_fprop_K64_N64 .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/for_each.h> #include <iterator> #include <thrust/copy.h> #include <thrust/transform.h> #include <algorithm> #include <vector> #include <thrust/sort.h> int main() { thrust::host_vector<int> host_vec(5); //initiating host vector for(int z = 0; z <host_vec.size();++z) host_vec[z]=z; //Printing std::cout<<"Host vector before"<< std::endl; std::copy(host_vec.begin(), host_vec.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout<<std::endl; thrust::device_vector<int> dev_vec = host_vec; //creating device vector and copying host vec into it thrust::transform(dev_vec.begin(), dev_vec.end(),dev_vec.begin(),thrust::negate<int>()); //transformation on GPU thrust::copy(dev_vec.begin(), dev_vec.end(), host_vec.begin()); //copy back to cpu //Printing std::cout<<"Host vector After"<< std::endl; std::copy(host_vec.begin(), host_vec.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout<<std::endl; }
code for sm_80 Function : _ZN3cub17CUB_200700_800_NS6detail8for_each13static_kernelINS2_12policy_hub_t12policy_350_tElN6thrust20THRUST_200700_800_NS8cuda_cub11__transform17unary_transform_fINS7_6detail15normal_iteratorINS7_10device_ptrIiEEEESF_NS9_14no_stencil_tagENS7_6negateIiEENS9_21always_true_predicateEEEEEvT0_T1_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */ /* 0x000e620000002100 */ /*0040*/ IMAD.WIDE.U32 R2, R2, 0x200, RZ ; /* 0x0000020002027825 */ /* 0x001fca00078e00ff */ /*0050*/ IADD3 R0, P1, R2.reuse, R7, RZ ; /* 0x0000000702007210 */ /* 0x042fe40007f3e0ff */ /*0060*/ IADD3 R6, P0, -R2, c[0x0][0x160], RZ ; /* 0x0000580002067a10 */ /* 0x000fc60007f1e1ff */ /*0070*/ IMAD.X R5, RZ, RZ, R3, P1 ; /* 0x000000ffff057224 */ /* 0x000fe200008e0603 */ /*0080*/ IADD3.X R3, ~R3, c[0x0][0x164], RZ, P0, !PT ; /* 0x0000590003037a10 */ /* 0x000fe200007fe5ff */ /*0090*/ IMAD.SHL.U32 R2, R0, 0x4, RZ ; /* 0x0000000400027824 */ /* 0x000fe200078e00ff */ /*00a0*/ ISETP.GT.U32.AND P0, PT, R6, 0x1ff, PT ; /* 0x000001ff0600780c */ /* 0x000fe40003f04070 */ /*00b0*/ SHF.L.U64.HI R0, R0, 0x2, R5 ; /* 0x0000000200007819 */ /* 0x000fe40000010205 */ /*00c0*/ ISETP.GT.AND.EX P0, PT, R3, RZ, PT, P0 ; /* 0x000000ff0300720c */ /* 0x000fe40003f04300 */ /*00d0*/ IADD3 R4, P1, R2.reuse, c[0x0][0x168], RZ ; /* 0x00005a0002047a10 */ /* 0x040fe40007f3e0ff */ /*00e0*/ IADD3 R2, P2, R2, c[0x0][0x170], RZ ; /* 0x00005c0002027a10 */ /* 0x000fc40007f5e0ff */ /*00f0*/ IADD3.X R5, R0.reuse, c[0x0][0x16c], RZ, P1, !PT ; /* 0x00005b0000057a10 */ /* 0x040fe40000ffe4ff */ /*0100*/ IADD3.X R3, R0, c[0x0][0x174], RZ, P2, !PT ; /* 0x00005d0000037a10 */ /* 0x000fca00017fe4ff */ /*0110*/ @P0 BRA 0x230 ; /* 0x0000011000000947 */ /* 0x000fea0003800000 */ /*0120*/ IADD3 R0, R7, 0x100, RZ ; /* 0x0000010007007810 */ /* 0x000fe20007ffe0ff */ /*0130*/ BSSY B0, 0x1e0 ; /* 0x000000a000007945 */ /* 0x000fe20003800000 */ /*0140*/ ISETP.GT.U32.AND P0, PT, R6.reuse, R7, PT ; /* 0x000000070600720c */ /* 0x040fe40003f04070 */ /*0150*/ SHF.R.S32.HI R7, RZ, 0x1f, R6 ; /* 0x0000001fff077819 */ /* 0x000fe40000011406 */ /*0160*/ ISETP.GT.U32.AND P1, PT, R6, R0, PT ; /* 0x000000000600720c */ /* 0x000fe40003f24070 */ /*0170*/ ISETP.GT.AND.EX P0, PT, R7.reuse, RZ, PT, P0 ; /* 0x000000ff0700720c */ /* 0x040fe40003f04300 */ /*0180*/ ISETP.GT.AND.EX P1, PT, R7, RZ, PT, P1 ; /* 0x000000ff0700720c */ /* 0x000fd60003f24310 */ /*0190*/ @!P0 BRA 0x1d0 ; /* 0x0000003000008947 */ /* 0x000fea0003800000 */ /*01a0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000ea4000c1e1900 */ /*01b0*/ IMAD.MOV R7, RZ, RZ, -R0 ; /* 0x000000ffff077224 */ /* 0x004fca00078e0a00 */ /*01c0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0001e4000c101904 */ /*01d0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*01e0*/ @!P1 EXIT ; /* 0x000000000000994d */ /* 0x000fea0003800000 */ /*01f0*/ LDG.E R4, [R4.64+0x400] ; /* 0x0004000404047981 */ /* 0x000ea4000c1e1900 */ /*0200*/ IMAD.MOV R7, RZ, RZ, -R4 ; /* 0x000000ffff077224 */ /* 0x005fca00078e0a04 */ /*0210*/ STG.E [R2.64+0x400], R7 ; /* 0x0004000702007986 */ /* 0x000fe2000c101904 */ /*0220*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0230*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000ea4000c1e1900 */ /*0240*/ IMAD.MOV R7, RZ, RZ, -R0 ; /* 0x000000ffff077224 */ /* 0x004fca00078e0a00 */ /*0250*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x000fe8000c101904 */ /*0260*/ LDG.E R0, [R4.64+0x400] ; /* 0x0004000404007981 */ /* 0x000ea4000c1e1900 */ /*0270*/ IMAD.MOV R9, RZ, RZ, -R0 ; /* 0x000000ffff097224 */ /* 0x004fca00078e0a00 */ /*0280*/ STG.E [R2.64+0x400], R9 ; /* 0x0004000902007986 */ /* 0x000fe2000c101904 */ /*0290*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*02a0*/ BRA 0x2a0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/for_each.h> #include <iterator> #include <thrust/copy.h> #include <thrust/transform.h> #include <algorithm> #include <vector> #include <thrust/sort.h> int main() { thrust::host_vector<int> host_vec(5); //initiating host vector for(int z = 0; z <host_vec.size();++z) host_vec[z]=z; //Printing std::cout<<"Host vector before"<< std::endl; std::copy(host_vec.begin(), host_vec.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout<<std::endl; thrust::device_vector<int> dev_vec = host_vec; //creating device vector and copying host vec into it thrust::transform(dev_vec.begin(), dev_vec.end(),dev_vec.begin(),thrust::negate<int>()); //transformation on GPU thrust::copy(dev_vec.begin(), dev_vec.end(), host_vec.begin()); //copy back to cpu //Printing std::cout<<"Host vector After"<< std::endl; std::copy(host_vec.begin(), host_vec.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout<<std::endl; }
#include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/for_each.h> #include <iterator> #include <thrust/copy.h> #include <thrust/transform.h> #include <algorithm> #include <vector> #include <thrust/sort.h> int main() { thrust::host_vector<int> host_vec(5); //initiating host vector for(int z = 0; z <host_vec.size();++z) host_vec[z]=z; //Printing std::cout<<"Host vector before"<< std::endl; std::copy(host_vec.begin(), host_vec.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout<<std::endl; thrust::device_vector<int> dev_vec = host_vec; //creating device vector and copying host vec into it thrust::transform(dev_vec.begin(), dev_vec.end(),dev_vec.begin(),thrust::negate<int>()); //transformation on GPU thrust::copy(dev_vec.begin(), dev_vec.end(), host_vec.begin()); //copy back to cpu //Printing std::cout<<"Host vector After"<< std::endl; std::copy(host_vec.begin(), host_vec.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout<<std::endl; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/for_each.h> #include <iterator> #include <thrust/copy.h> #include <thrust/transform.h> #include <algorithm> #include <vector> #include <thrust/sort.h> int main() { thrust::host_vector<int> host_vec(5); //initiating host vector for(int z = 0; z <host_vec.size();++z) host_vec[z]=z; //Printing std::cout<<"Host vector before"<< std::endl; std::copy(host_vec.begin(), host_vec.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout<<std::endl; thrust::device_vector<int> dev_vec = host_vec; //creating device vector and copying host vec into it thrust::transform(dev_vec.begin(), dev_vec.end(),dev_vec.begin(),thrust::negate<int>()); //transformation on GPU thrust::copy(dev_vec.begin(), dev_vec.end(), host_vec.begin()); //copy back to cpu //Printing std::cout<<"Host vector After"<< std::endl; std::copy(host_vec.begin(), host_vec.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout<<std::endl; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,comdat .protected _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .globl _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .p2align 8 .type _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,@function _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_: s_load_b128 s[4:7], s[0:1], 0x18 s_lshl_b32 s2, s15, 8 s_waitcnt lgkmcnt(0) s_add_u32 s2, s2, s6 s_addc_u32 s3, 0, s7 s_sub_u32 s4, s4, s2 s_subb_u32 s5, s5, s3 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_i64_e64 s5, 0x100, s[4:5] s_and_b32 s5, s5, exec_lo s_cselect_b32 s4, s4, 0x100 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_cmp_gt_u32_e32 vcc_lo, s4, v0 s_cmpk_eq_i32 s4, 0x100 s_cselect_b32 s4, -1, 0 s_or_b32 s4, s4, vcc_lo s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s5, s4 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x0 v_add_co_u32 v0, s0, s2, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_co_ci_u32_e64 v1, null, s3, 0, s0 v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo flat_load_b32 v2, v[2:3] s_waitcnt vmcnt(0) lgkmcnt(0) v_sub_nc_u32_e32 v2, 0, v2 flat_store_b32 v[0:1], v2 .LBB0_2: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 40 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,comdat .Lfunc_end0: .size _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_, .Lfunc_end0-_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 24 .value_kind: by_value - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 8 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 40 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 256 .name: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _ZN3cub17CUB_200700_800_NS6detail8for_each13static_kernelINS2_12policy_hub_t12policy_350_tElN6thrust20THRUST_200700_800_NS8cuda_cub11__transform17unary_transform_fINS7_6detail15normal_iteratorINS7_10device_ptrIiEEEESF_NS9_14no_stencil_tagENS7_6negateIiEENS9_21always_true_predicateEEEEEvT0_T1_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */ /* 0x000e620000002100 */ /*0040*/ IMAD.WIDE.U32 R2, R2, 0x200, RZ ; /* 0x0000020002027825 */ /* 0x001fca00078e00ff */ /*0050*/ IADD3 R0, P1, R2.reuse, R7, RZ ; /* 0x0000000702007210 */ /* 0x042fe40007f3e0ff */ /*0060*/ IADD3 R6, P0, -R2, c[0x0][0x160], RZ ; /* 0x0000580002067a10 */ /* 0x000fc60007f1e1ff */ /*0070*/ IMAD.X R5, RZ, RZ, R3, P1 ; /* 0x000000ffff057224 */ /* 0x000fe200008e0603 */ /*0080*/ IADD3.X R3, ~R3, c[0x0][0x164], RZ, P0, !PT ; /* 0x0000590003037a10 */ /* 0x000fe200007fe5ff */ /*0090*/ IMAD.SHL.U32 R2, R0, 0x4, RZ ; /* 0x0000000400027824 */ /* 0x000fe200078e00ff */ /*00a0*/ ISETP.GT.U32.AND P0, PT, R6, 0x1ff, PT ; /* 0x000001ff0600780c */ /* 0x000fe40003f04070 */ /*00b0*/ SHF.L.U64.HI R0, R0, 0x2, R5 ; /* 0x0000000200007819 */ /* 0x000fe40000010205 */ /*00c0*/ ISETP.GT.AND.EX P0, PT, R3, RZ, PT, P0 ; /* 0x000000ff0300720c */ /* 0x000fe40003f04300 */ /*00d0*/ IADD3 R4, P1, R2.reuse, c[0x0][0x168], RZ ; /* 0x00005a0002047a10 */ /* 0x040fe40007f3e0ff */ /*00e0*/ IADD3 R2, P2, R2, c[0x0][0x170], RZ ; /* 0x00005c0002027a10 */ /* 0x000fc40007f5e0ff */ /*00f0*/ IADD3.X R5, R0.reuse, c[0x0][0x16c], RZ, P1, !PT ; /* 0x00005b0000057a10 */ /* 0x040fe40000ffe4ff */ /*0100*/ IADD3.X R3, R0, c[0x0][0x174], RZ, P2, !PT ; /* 0x00005d0000037a10 */ /* 0x000fca00017fe4ff */ /*0110*/ @P0 BRA 0x230 ; /* 0x0000011000000947 */ /* 0x000fea0003800000 */ /*0120*/ IADD3 R0, R7, 0x100, RZ ; /* 0x0000010007007810 */ /* 0x000fe20007ffe0ff */ /*0130*/ BSSY B0, 0x1e0 ; /* 0x000000a000007945 */ /* 0x000fe20003800000 */ /*0140*/ ISETP.GT.U32.AND P0, PT, R6.reuse, R7, PT ; /* 0x000000070600720c */ /* 0x040fe40003f04070 */ /*0150*/ SHF.R.S32.HI R7, RZ, 0x1f, R6 ; /* 0x0000001fff077819 */ /* 0x000fe40000011406 */ /*0160*/ ISETP.GT.U32.AND P1, PT, R6, R0, PT ; /* 0x000000000600720c */ /* 0x000fe40003f24070 */ /*0170*/ ISETP.GT.AND.EX P0, PT, R7.reuse, RZ, PT, P0 ; /* 0x000000ff0700720c */ /* 0x040fe40003f04300 */ /*0180*/ ISETP.GT.AND.EX P1, PT, R7, RZ, PT, P1 ; /* 0x000000ff0700720c */ /* 0x000fd60003f24310 */ /*0190*/ @!P0 BRA 0x1d0 ; /* 0x0000003000008947 */ /* 0x000fea0003800000 */ /*01a0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000ea4000c1e1900 */ /*01b0*/ IMAD.MOV R7, RZ, RZ, -R0 ; /* 0x000000ffff077224 */ /* 0x004fca00078e0a00 */ /*01c0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x0001e4000c101904 */ /*01d0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*01e0*/ @!P1 EXIT ; /* 0x000000000000994d */ /* 0x000fea0003800000 */ /*01f0*/ LDG.E R4, [R4.64+0x400] ; /* 0x0004000404047981 */ /* 0x000ea4000c1e1900 */ /*0200*/ IMAD.MOV R7, RZ, RZ, -R4 ; /* 0x000000ffff077224 */ /* 0x005fca00078e0a04 */ /*0210*/ STG.E [R2.64+0x400], R7 ; /* 0x0004000702007986 */ /* 0x000fe2000c101904 */ /*0220*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0230*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000ea4000c1e1900 */ /*0240*/ IMAD.MOV R7, RZ, RZ, -R0 ; /* 0x000000ffff077224 */ /* 0x004fca00078e0a00 */ /*0250*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x000fe8000c101904 */ /*0260*/ LDG.E R0, [R4.64+0x400] ; /* 0x0004000404007981 */ /* 0x000ea4000c1e1900 */ /*0270*/ IMAD.MOV R9, RZ, RZ, -R0 ; /* 0x000000ffff097224 */ /* 0x004fca00078e0a00 */ /*0280*/ STG.E [R2.64+0x400], R9 ; /* 0x0004000902007986 */ /* 0x000fe2000c101904 */ /*0290*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*02a0*/ BRA 0x2a0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,comdat .protected _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .globl _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .p2align 8 .type _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,@function _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_: s_load_b128 s[4:7], s[0:1], 0x18 s_lshl_b32 s2, s15, 8 s_waitcnt lgkmcnt(0) s_add_u32 s2, s2, s6 s_addc_u32 s3, 0, s7 s_sub_u32 s4, s4, s2 s_subb_u32 s5, s5, s3 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_i64_e64 s5, 0x100, s[4:5] s_and_b32 s5, s5, exec_lo s_cselect_b32 s4, s4, 0x100 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_cmp_gt_u32_e32 vcc_lo, s4, v0 s_cmpk_eq_i32 s4, 0x100 s_cselect_b32 s4, -1, 0 s_or_b32 s4, s4, vcc_lo s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s5, s4 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x0 v_add_co_u32 v0, s0, s2, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_co_ci_u32_e64 v1, null, s3, 0, s0 v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo flat_load_b32 v2, v[2:3] s_waitcnt vmcnt(0) lgkmcnt(0) v_sub_nc_u32_e32 v2, 0, v2 flat_store_b32 v[0:1], v2 .LBB0_2: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 40 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,comdat .Lfunc_end0: .size _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_, .Lfunc_end0-_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 24 .value_kind: by_value - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 8 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 40 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 256 .name: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform17unary_transform_fINS_6detail15normal_iteratorINS_10device_ptrIiEEEES9_NS3_14no_stencil_tagENS_6negateIiEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <cuda.h> __global__ void helloKernel() { /* ----- YOUR CODE HERE ----- */ /* -------------------------- */ } int main() { printf("Hello from the CPU\n"); /* ----- YOUR CODE HERE ----- */ /* -------------------------- */ cudaDeviceSynchronize(); return 0; }
code for sm_80 Function : _Z11helloKernelv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <cuda.h> __global__ void helloKernel() { /* ----- YOUR CODE HERE ----- */ /* -------------------------- */ } int main() { printf("Hello from the CPU\n"); /* ----- YOUR CODE HERE ----- */ /* -------------------------- */ cudaDeviceSynchronize(); return 0; }
.file "tmpxft_0000d318_00000000-6_threads_and_blocks.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Hello from the CPU\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaDeviceSynchronize@PLT movl $0, %eax addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size main, .-main .globl _Z30__device_stub__Z11helloKernelvv .type _Z30__device_stub__Z11helloKernelvv, @function _Z30__device_stub__Z11helloKernelvv: .LFB2082: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L9 .L5: movq 72(%rsp), %rax subq %fs:40, %rax jne .L10 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z11helloKernelv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L5 .L10: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z30__device_stub__Z11helloKernelvv, .-_Z30__device_stub__Z11helloKernelvv .globl _Z11helloKernelv .type _Z11helloKernelv, @function _Z11helloKernelv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z30__device_stub__Z11helloKernelvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z11helloKernelv, .-_Z11helloKernelv .section .rodata.str1.1 .LC1: .string "_Z11helloKernelv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z11helloKernelv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <cuda.h> __global__ void helloKernel() { /* ----- YOUR CODE HERE ----- */ /* -------------------------- */ } int main() { printf("Hello from the CPU\n"); /* ----- YOUR CODE HERE ----- */ /* -------------------------- */ cudaDeviceSynchronize(); return 0; }
#include <stdio.h> #include <hip/hip_runtime.h> __global__ void helloKernel() { /* ----- YOUR CODE HERE ----- */ /* -------------------------- */ } int main() { printf("Hello from the CPU\n"); /* ----- YOUR CODE HERE ----- */ /* -------------------------- */ hipDeviceSynchronize(); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <stdio.h> #include <hip/hip_runtime.h> __global__ void helloKernel() { /* ----- YOUR CODE HERE ----- */ /* -------------------------- */ } int main() { printf("Hello from the CPU\n"); /* ----- YOUR CODE HERE ----- */ /* -------------------------- */ hipDeviceSynchronize(); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z11helloKernelv .globl _Z11helloKernelv .p2align 8 .type _Z11helloKernelv,@function _Z11helloKernelv: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11helloKernelv .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 0 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 0 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 1 .amdhsa_next_free_sgpr 1 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z11helloKernelv, .Lfunc_end0-_Z11helloKernelv .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: [] .group_segment_fixed_size: 0 .kernarg_segment_align: 4 .kernarg_segment_size: 0 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11helloKernelv .private_segment_fixed_size: 0 .sgpr_count: 0 .sgpr_spill_count: 0 .symbol: _Z11helloKernelv.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 0 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <stdio.h> #include <hip/hip_runtime.h> __global__ void helloKernel() { /* ----- YOUR CODE HERE ----- */ /* -------------------------- */ } int main() { printf("Hello from the CPU\n"); /* ----- YOUR CODE HERE ----- */ /* -------------------------- */ hipDeviceSynchronize(); return 0; }
.text .file "threads_and_blocks.hip" .globl _Z26__device_stub__helloKernelv # -- Begin function _Z26__device_stub__helloKernelv .p2align 4, 0x90 .type _Z26__device_stub__helloKernelv,@function _Z26__device_stub__helloKernelv: # @_Z26__device_stub__helloKernelv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z11helloKernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z26__device_stub__helloKernelv, .Lfunc_end0-_Z26__device_stub__helloKernelv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rax .cfi_def_cfa_offset 16 movl $.Lstr, %edi callq puts@PLT callq hipDeviceSynchronize xorl %eax, %eax popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11helloKernelv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z11helloKernelv,@object # @_Z11helloKernelv .section .rodata,"a",@progbits .globl _Z11helloKernelv .p2align 3, 0x0 _Z11helloKernelv: .quad _Z26__device_stub__helloKernelv .size _Z11helloKernelv, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z11helloKernelv" .size .L__unnamed_1, 17 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Hello from the CPU" .size .Lstr, 19 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z26__device_stub__helloKernelv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z11helloKernelv .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z11helloKernelv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z11helloKernelv .globl _Z11helloKernelv .p2align 8 .type _Z11helloKernelv,@function _Z11helloKernelv: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11helloKernelv .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 0 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 0 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 1 .amdhsa_next_free_sgpr 1 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z11helloKernelv, .Lfunc_end0-_Z11helloKernelv .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: [] .group_segment_fixed_size: 0 .kernarg_segment_align: 4 .kernarg_segment_size: 0 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11helloKernelv .private_segment_fixed_size: 0 .sgpr_count: 0 .sgpr_spill_count: 0 .symbol: _Z11helloKernelv.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 0 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0000d318_00000000-6_threads_and_blocks.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Hello from the CPU\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaDeviceSynchronize@PLT movl $0, %eax addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size main, .-main .globl _Z30__device_stub__Z11helloKernelvv .type _Z30__device_stub__Z11helloKernelvv, @function _Z30__device_stub__Z11helloKernelvv: .LFB2082: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L9 .L5: movq 72(%rsp), %rax subq %fs:40, %rax jne .L10 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z11helloKernelv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L5 .L10: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z30__device_stub__Z11helloKernelvv, .-_Z30__device_stub__Z11helloKernelvv .globl _Z11helloKernelv .type _Z11helloKernelv, @function _Z11helloKernelv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z30__device_stub__Z11helloKernelvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z11helloKernelv, .-_Z11helloKernelv .section .rodata.str1.1 .LC1: .string "_Z11helloKernelv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z11helloKernelv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "threads_and_blocks.hip" .globl _Z26__device_stub__helloKernelv # -- Begin function _Z26__device_stub__helloKernelv .p2align 4, 0x90 .type _Z26__device_stub__helloKernelv,@function _Z26__device_stub__helloKernelv: # @_Z26__device_stub__helloKernelv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z11helloKernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z26__device_stub__helloKernelv, .Lfunc_end0-_Z26__device_stub__helloKernelv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rax .cfi_def_cfa_offset 16 movl $.Lstr, %edi callq puts@PLT callq hipDeviceSynchronize xorl %eax, %eax popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11helloKernelv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z11helloKernelv,@object # @_Z11helloKernelv .section .rodata,"a",@progbits .globl _Z11helloKernelv .p2align 3, 0x0 _Z11helloKernelv: .quad _Z26__device_stub__helloKernelv .size _Z11helloKernelv, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z11helloKernelv" .size .L__unnamed_1, 17 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Hello from the CPU" .size .Lstr, 19 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z26__device_stub__helloKernelv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z11helloKernelv .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> __global__ void unique_idx_calc_threadIdx(int * input) { int tid = threadIdx.x; printf("threadIdx : %d, value : %d \n", tid, input[tid]); } __global__ void unique_gid_calculation(int * input) { int tid = threadIdx.x; int offset = blockIdx.x * blockDim.x; int gid = tid + offset; printf("blockIdx.x : %d, threadIdx.x : %d, gid : %d, value : %d \n", blockIdx.x, tid, gid, input[gid]); } //int main() //{ // int array_size = 16; // int array_byte_size = sizeof(int) * array_size; // int h_data[] = {23,9,4,53,65,12,1,33,87,45,23,12,342,56,44,99}; // // for (int i = 0; i < array_size; i++) // { // printf("%d ", h_data[i]); // } // printf("\n \n"); // // int * d_data; // cudaMalloc((void**)&d_data, array_byte_size); // cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice); // // dim3 block(4); // dim3 grid(4); // // unique_gid_calculation <<< grid, block >> > (d_data); // cudaDeviceSynchronize(); // // cudaDeviceReset(); // return 0; //}
code for sm_80 Function : _Z22unique_gid_calculationPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R13, SR_TID.X ; /* 0x00000000000d7919 */ /* 0x000e220000002100 */ /*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0040*/ IADD3 R1, R1, -0x10, RZ ; /* 0xfffffff001017810 */ /* 0x000fe20007ffe0ff */ /*0050*/ S2R R12, SR_CTAID.X ; /* 0x00000000000c7919 */ /* 0x000e240000002500 */ /*0060*/ IMAD R14, R12, c[0x0][0x0], R13 ; /* 0x000000000c0e7a24 */ /* 0x001fc800078e020d */ /*0070*/ IMAD.WIDE R2, R14, R3, c[0x0][0x160] ; /* 0x000058000e027625 */ /* 0x000fca00078e0203 */ /*0080*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x000ea2000c1e1900 */ /*0090*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe20000000f00 */ /*00a0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x10] ; /* 0x01000400ff047624 */ /* 0x000fe200078e00ff */ /*00b0*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */ /* 0x000fe20007f1e0ff */ /*00c0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x14] ; /* 0x01000500ff057624 */ /* 0x000fe200078e00ff */ /*00d0*/ LDC.64 R8, c[0x4][R0] ; /* 0x0100000000087b82 */ /* 0x0000660000000a00 */ /*00e0*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */ /* 0x000fe200000e06ff */ /*00f0*/ STL.128 [R1], R12 ; /* 0x0000000c01007387 */ /* 0x0041e80000100c00 */ /*0100*/ LEPC R2 ; /* 0x000000000002734e */ /* 0x002fc60000000000 */ /*0110*/ MOV R11, 0x180 ; /* 0x00000180000b7802 */ /* 0x000fe40000000f00 */ /*0120*/ MOV R20, 0x100 ; /* 0x0000010000147802 */ /* 0x000fc40000000f00 */ /*0130*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*0140*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x001fe40000000f00 */ /*0150*/ IADD3 R20, P0, P1, -R20, R11, R2 ; /* 0x0000000b14147210 */ /* 0x000fc8000791e102 */ /*0160*/ IADD3.X R21, ~R0, R21, R3, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2503 */ /*0170*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */ /* 0x000fea0003c00000 */ /*0180*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0190*/ BRA 0x190; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z25unique_idx_calc_threadIdxPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R10, SR_TID.X ; /* 0x00000000000a7919 */ /* 0x000e220000002100 */ /*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0040*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */ /* 0x000fe40007ffe0ff */ /*0050*/ IMAD.WIDE R2, R10, R3, c[0x0][0x160] ; /* 0x000058000a027625 */ /* 0x001fca00078e0203 */ /*0060*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x000ea2000c1e1900 */ /*0070*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe20000000f00 */ /*0080*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */ /* 0x000fe200078e00ff */ /*0090*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */ /* 0x000fe20007f1e0ff */ /*00a0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */ /* 0x000fe200078e00ff */ /*00b0*/ LDC.64 R8, c[0x4][R0] ; /* 0x0100000000087b82 */ /* 0x0000660000000a00 */ /*00c0*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */ /* 0x000fe200000e06ff */ /*00d0*/ STL.64 [R1], R10 ; /* 0x0000000a01007387 */ /* 0x0041e80000100a00 */ /*00e0*/ LEPC R2 ; /* 0x000000000002734e */ /* 0x002fc60000000000 */ /*00f0*/ MOV R11, 0x160 ; /* 0x00000160000b7802 */ /* 0x001fe40000000f00 */ /*0100*/ MOV R20, 0xe0 ; /* 0x000000e000147802 */ /* 0x000fc40000000f00 */ /*0110*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*0120*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*0130*/ IADD3 R20, P0, P1, -R20, R11, R2 ; /* 0x0000000b14147210 */ /* 0x000fc8000791e102 */ /*0140*/ IADD3.X R21, ~R0, R21, R3, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2503 */ /*0150*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */ /* 0x000fea0003c00000 */ /*0160*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0170*/ BRA 0x170; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> __global__ void unique_idx_calc_threadIdx(int * input) { int tid = threadIdx.x; printf("threadIdx : %d, value : %d \n", tid, input[tid]); } __global__ void unique_gid_calculation(int * input) { int tid = threadIdx.x; int offset = blockIdx.x * blockDim.x; int gid = tid + offset; printf("blockIdx.x : %d, threadIdx.x : %d, gid : %d, value : %d \n", blockIdx.x, tid, gid, input[gid]); } //int main() //{ // int array_size = 16; // int array_byte_size = sizeof(int) * array_size; // int h_data[] = {23,9,4,53,65,12,1,33,87,45,23,12,342,56,44,99}; // // for (int i = 0; i < array_size; i++) // { // printf("%d ", h_data[i]); // } // printf("\n \n"); // // int * d_data; // cudaMalloc((void**)&d_data, array_byte_size); // cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice); // // dim3 block(4); // dim3 grid(4); // // unique_gid_calculation <<< grid, block >> > (d_data); // cudaDeviceSynchronize(); // // cudaDeviceReset(); // return 0; //}
.file "tmpxft_0002bb47_00000000-6_9_unique_index_calculation_threadIdx_blockIdx_blockDim.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2059: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z45__device_stub__Z25unique_idx_calc_threadIdxPiPi .type _Z45__device_stub__Z25unique_idx_calc_threadIdxPiPi, @function _Z45__device_stub__Z25unique_idx_calc_threadIdxPiPi: .LFB2081: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 88(%rsp), %rax subq %fs:40, %rax jne .L8 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z25unique_idx_calc_threadIdxPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2081: .size _Z45__device_stub__Z25unique_idx_calc_threadIdxPiPi, .-_Z45__device_stub__Z25unique_idx_calc_threadIdxPiPi .globl _Z25unique_idx_calc_threadIdxPi .type _Z25unique_idx_calc_threadIdxPi, @function _Z25unique_idx_calc_threadIdxPi: .LFB2082: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z45__device_stub__Z25unique_idx_calc_threadIdxPiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2082: .size _Z25unique_idx_calc_threadIdxPi, .-_Z25unique_idx_calc_threadIdxPi .globl _Z42__device_stub__Z22unique_gid_calculationPiPi .type _Z42__device_stub__Z22unique_gid_calculationPiPi, @function _Z42__device_stub__Z22unique_gid_calculationPiPi: .LFB2083: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 88(%rsp), %rax subq %fs:40, %rax jne .L16 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z22unique_gid_calculationPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z42__device_stub__Z22unique_gid_calculationPiPi, .-_Z42__device_stub__Z22unique_gid_calculationPiPi .globl _Z22unique_gid_calculationPi .type _Z22unique_gid_calculationPi, @function _Z22unique_gid_calculationPi: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z22unique_gid_calculationPiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z22unique_gid_calculationPi, .-_Z22unique_gid_calculationPi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z22unique_gid_calculationPi" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "_Z25unique_idx_calc_threadIdxPi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z22unique_gid_calculationPi(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z25unique_idx_calc_threadIdxPi(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> __global__ void unique_idx_calc_threadIdx(int * input) { int tid = threadIdx.x; printf("threadIdx : %d, value : %d \n", tid, input[tid]); } __global__ void unique_gid_calculation(int * input) { int tid = threadIdx.x; int offset = blockIdx.x * blockDim.x; int gid = tid + offset; printf("blockIdx.x : %d, threadIdx.x : %d, gid : %d, value : %d \n", blockIdx.x, tid, gid, input[gid]); } //int main() //{ // int array_size = 16; // int array_byte_size = sizeof(int) * array_size; // int h_data[] = {23,9,4,53,65,12,1,33,87,45,23,12,342,56,44,99}; // // for (int i = 0; i < array_size; i++) // { // printf("%d ", h_data[i]); // } // printf("\n \n"); // // int * d_data; // cudaMalloc((void**)&d_data, array_byte_size); // cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice); // // dim3 block(4); // dim3 grid(4); // // unique_gid_calculation <<< grid, block >> > (d_data); // cudaDeviceSynchronize(); // // cudaDeviceReset(); // return 0; //}
#include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void unique_idx_calc_threadIdx(int * input) { int tid = threadIdx.x; printf("threadIdx : %d, value : %d \n", tid, input[tid]); } __global__ void unique_gid_calculation(int * input) { int tid = threadIdx.x; int offset = blockIdx.x * blockDim.x; int gid = tid + offset; printf("blockIdx.x : %d, threadIdx.x : %d, gid : %d, value : %d \n", blockIdx.x, tid, gid, input[gid]); } //int main() //{ // int array_size = 16; // int array_byte_size = sizeof(int) * array_size; // int h_data[] = {23,9,4,53,65,12,1,33,87,45,23,12,342,56,44,99}; // // for (int i = 0; i < array_size; i++) // { // printf("%d ", h_data[i]); // } // printf("\n \n"); // // int * d_data; // cudaMalloc((void**)&d_data, array_byte_size); // cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice); // // dim3 block(4); // dim3 grid(4); // // unique_gid_calculation <<< grid, block >> > (d_data); // cudaDeviceSynchronize(); // // cudaDeviceReset(); // return 0; //}
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void unique_idx_calc_threadIdx(int * input) { int tid = threadIdx.x; printf("threadIdx : %d, value : %d \n", tid, input[tid]); } __global__ void unique_gid_calculation(int * input) { int tid = threadIdx.x; int offset = blockIdx.x * blockDim.x; int gid = tid + offset; printf("blockIdx.x : %d, threadIdx.x : %d, gid : %d, value : %d \n", blockIdx.x, tid, gid, input[gid]); } //int main() //{ // int array_size = 16; // int array_byte_size = sizeof(int) * array_size; // int h_data[] = {23,9,4,53,65,12,1,33,87,45,23,12,342,56,44,99}; // // for (int i = 0; i < array_size; i++) // { // printf("%d ", h_data[i]); // } // printf("\n \n"); // // int * d_data; // cudaMalloc((void**)&d_data, array_byte_size); // cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice); // // dim3 block(4); // dim3 grid(4); // // unique_gid_calculation <<< grid, block >> > (d_data); // cudaDeviceSynchronize(); // // cudaDeviceReset(); // return 0; //}
.text .file "9_unique_index_calculation_threadIdx_blockIdx_blockDim.hip" .globl _Z40__device_stub__unique_idx_calc_threadIdxPi # -- Begin function _Z40__device_stub__unique_idx_calc_threadIdxPi .p2align 4, 0x90 .type _Z40__device_stub__unique_idx_calc_threadIdxPi,@function _Z40__device_stub__unique_idx_calc_threadIdxPi: # @_Z40__device_stub__unique_idx_calc_threadIdxPi .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z25unique_idx_calc_threadIdxPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end0: .size _Z40__device_stub__unique_idx_calc_threadIdxPi, .Lfunc_end0-_Z40__device_stub__unique_idx_calc_threadIdxPi .cfi_endproc # -- End function .globl _Z37__device_stub__unique_gid_calculationPi # -- Begin function _Z37__device_stub__unique_gid_calculationPi .p2align 4, 0x90 .type _Z37__device_stub__unique_gid_calculationPi,@function _Z37__device_stub__unique_gid_calculationPi: # @_Z37__device_stub__unique_gid_calculationPi .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z22unique_gid_calculationPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end1: .size _Z37__device_stub__unique_gid_calculationPi, .Lfunc_end1-_Z37__device_stub__unique_gid_calculationPi .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z25unique_idx_calc_threadIdxPi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z22unique_gid_calculationPi, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z25unique_idx_calc_threadIdxPi,@object # @_Z25unique_idx_calc_threadIdxPi .section .rodata,"a",@progbits .globl _Z25unique_idx_calc_threadIdxPi .p2align 3, 0x0 _Z25unique_idx_calc_threadIdxPi: .quad _Z40__device_stub__unique_idx_calc_threadIdxPi .size _Z25unique_idx_calc_threadIdxPi, 8 .type _Z22unique_gid_calculationPi,@object # @_Z22unique_gid_calculationPi .globl _Z22unique_gid_calculationPi .p2align 3, 0x0 _Z22unique_gid_calculationPi: .quad _Z37__device_stub__unique_gid_calculationPi .size _Z22unique_gid_calculationPi, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z25unique_idx_calc_threadIdxPi" .size .L__unnamed_1, 32 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z22unique_gid_calculationPi" .size .L__unnamed_2, 29 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z40__device_stub__unique_idx_calc_threadIdxPi .addrsig_sym _Z37__device_stub__unique_gid_calculationPi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z25unique_idx_calc_threadIdxPi .addrsig_sym _Z22unique_gid_calculationPi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0002bb47_00000000-6_9_unique_index_calculation_threadIdx_blockIdx_blockDim.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2059: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z45__device_stub__Z25unique_idx_calc_threadIdxPiPi .type _Z45__device_stub__Z25unique_idx_calc_threadIdxPiPi, @function _Z45__device_stub__Z25unique_idx_calc_threadIdxPiPi: .LFB2081: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 88(%rsp), %rax subq %fs:40, %rax jne .L8 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z25unique_idx_calc_threadIdxPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2081: .size _Z45__device_stub__Z25unique_idx_calc_threadIdxPiPi, .-_Z45__device_stub__Z25unique_idx_calc_threadIdxPiPi .globl _Z25unique_idx_calc_threadIdxPi .type _Z25unique_idx_calc_threadIdxPi, @function _Z25unique_idx_calc_threadIdxPi: .LFB2082: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z45__device_stub__Z25unique_idx_calc_threadIdxPiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2082: .size _Z25unique_idx_calc_threadIdxPi, .-_Z25unique_idx_calc_threadIdxPi .globl _Z42__device_stub__Z22unique_gid_calculationPiPi .type _Z42__device_stub__Z22unique_gid_calculationPiPi, @function _Z42__device_stub__Z22unique_gid_calculationPiPi: .LFB2083: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 88(%rsp), %rax subq %fs:40, %rax jne .L16 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z22unique_gid_calculationPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z42__device_stub__Z22unique_gid_calculationPiPi, .-_Z42__device_stub__Z22unique_gid_calculationPiPi .globl _Z22unique_gid_calculationPi .type _Z22unique_gid_calculationPi, @function _Z22unique_gid_calculationPi: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z22unique_gid_calculationPiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z22unique_gid_calculationPi, .-_Z22unique_gid_calculationPi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z22unique_gid_calculationPi" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "_Z25unique_idx_calc_threadIdxPi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z22unique_gid_calculationPi(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z25unique_idx_calc_threadIdxPi(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "9_unique_index_calculation_threadIdx_blockIdx_blockDim.hip" .globl _Z40__device_stub__unique_idx_calc_threadIdxPi # -- Begin function _Z40__device_stub__unique_idx_calc_threadIdxPi .p2align 4, 0x90 .type _Z40__device_stub__unique_idx_calc_threadIdxPi,@function _Z40__device_stub__unique_idx_calc_threadIdxPi: # @_Z40__device_stub__unique_idx_calc_threadIdxPi .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z25unique_idx_calc_threadIdxPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end0: .size _Z40__device_stub__unique_idx_calc_threadIdxPi, .Lfunc_end0-_Z40__device_stub__unique_idx_calc_threadIdxPi .cfi_endproc # -- End function .globl _Z37__device_stub__unique_gid_calculationPi # -- Begin function _Z37__device_stub__unique_gid_calculationPi .p2align 4, 0x90 .type _Z37__device_stub__unique_gid_calculationPi,@function _Z37__device_stub__unique_gid_calculationPi: # @_Z37__device_stub__unique_gid_calculationPi .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z22unique_gid_calculationPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end1: .size _Z37__device_stub__unique_gid_calculationPi, .Lfunc_end1-_Z37__device_stub__unique_gid_calculationPi .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z25unique_idx_calc_threadIdxPi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z22unique_gid_calculationPi, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z25unique_idx_calc_threadIdxPi,@object # @_Z25unique_idx_calc_threadIdxPi .section .rodata,"a",@progbits .globl _Z25unique_idx_calc_threadIdxPi .p2align 3, 0x0 _Z25unique_idx_calc_threadIdxPi: .quad _Z40__device_stub__unique_idx_calc_threadIdxPi .size _Z25unique_idx_calc_threadIdxPi, 8 .type _Z22unique_gid_calculationPi,@object # @_Z22unique_gid_calculationPi .globl _Z22unique_gid_calculationPi .p2align 3, 0x0 _Z22unique_gid_calculationPi: .quad _Z37__device_stub__unique_gid_calculationPi .size _Z22unique_gid_calculationPi, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z25unique_idx_calc_threadIdxPi" .size .L__unnamed_1, 32 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z22unique_gid_calculationPi" .size .L__unnamed_2, 29 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z40__device_stub__unique_idx_calc_threadIdxPi .addrsig_sym _Z37__device_stub__unique_gid_calculationPi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z25unique_idx_calc_threadIdxPi .addrsig_sym _Z22unique_gid_calculationPi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
/* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* NOTES:. * 1) The tmp variables must each have space for length * batchSize * groupSize * sizeof(complexType). * 2) Templated types must be (cufftReal, cufftComplex) or (cufftDoubleReal, cufftDoubleComplex) * 3) Length must be even. * 4) DCT maps to a type-2 DCT. Inverse DCT maps to a type-3 DCT. IDCT(DCT(x)) == x. */ #include <stdio.h> #include <cufft.h> // Useful to have #define ROOT2 1.4142135623730951f // This is quite system dependent. Slower systems would benefit from a smaller value here. #define R2C_SWITCH_SIZE (1 << 19) template<typename realType, typename complexType, bool forward, bool R2C> __global__ void DCT_setup(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } if (A != NULL) { Alocal = A[groupID * length + index]; if (Ab != NULL) { Ablocal = Ab[groupID * length + index]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { realType val; if (forward) val = ((realType*)(in))[length * batchID + index]; else val = ((realType*)(in))[length * (batchID * groupSize + groupID) + index]; if (A != NULL) { val *= Alocal; if (Ab != NULL) { val += Ablocal; } } if (R2C) { ((realType*)(out))[element + length * (batchID * groupSize + groupID)] = (realType)val; } else { complexType outVal; outVal.x = val; outVal.y = 0.f; ((complexType*)(out))[element + length * (batchID * groupSize + groupID)] = outVal; } } } template<typename realType, typename complexType, bool R2C> __global__ void DCT_final(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; if (A != NULL) { Alocal = A[groupID * length + element]; if (Ab != NULL) { Ablocal = Ab[groupID * length + element]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val; if (R2C) { if (element <= length / 2) { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element]; val.y = -val.y; } } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } complexType val2; complexType ret; sincospi(element / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((realType)length); } else { ret.x *= ROOT2 * rsqrt((realType)length); } if (A != NULL) { ret.x *= Alocal; if (Ab != NULL) { ret.x += Ablocal; } } ((realType*)(out))[length * (batchID * groupSize + groupID) + element] = ret.x; } } template<typename realType, typename complexType> __global__ void IDCT_final(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } if (A != NULL) { Alocal = A[groupID * length + index]; if (Ab != NULL) { Ablocal = Ab[groupID * length + index]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; // "A" for backward pass if (A != NULL) { val.x *= Alocal; if (Ab != NULL) { val.x += Ablocal; } } ((realType*)(out))[length * (batchID * groupSize + groupID) + index] = val.x; } } template<typename realType, typename complexType, bool R2C> __global__ void DCT_final_IDCT_setup(int length, int batchSize, int groupSize, const realType * __restrict__ D, const realType * __restrict__ Db, const realType * __restrict__ in, realType * __restrict__ out, realType * __restrict__ deltaMid) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType dlocal; realType dblocal; if (D != NULL) { dlocal = D[groupID * length + element]; if (Db != NULL) { dblocal = Db[groupID * length + element]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val; if (R2C) { if (element <= length / 2) { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element]; val.y = -val.y; } } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } complexType val2; complexType ret; sincospi(element / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((realType)length); } else { ret.x *= ROOT2 * rsqrt((realType)length); } realType re_in = ret.x; if (D != NULL) { re_in *= dlocal; if (Db != NULL) { re_in += dblocal; } } if (deltaMid) { deltaMid[element + length * (batchID * groupSize + groupID)] = re_in; } // Un-normalisation if (element == 0) { re_in *= rsqrtf((realType)length); } else { re_in *= ROOT2 * rsqrtf((realType)length); } sincospi(element / (2.f * length), &(val2.y), &(val2.x)); val.x = re_in * val2.x; val.y = -re_in * val2.y; ((complexType*)(out))[length * (batchID * groupSize + groupID) + element] = val; } } template<typename realType> __global__ void updateWeights(int length, int batchSize, int groupSize, const realType * __restrict__ D, const realType * __restrict__ in, const realType * __restrict__ gradOutput, realType * __restrict__ delta_D, realType * __restrict__ delta_Db) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; D += length * groupID; delta_D += length * groupID; delta_Db += length * groupID; realType recp_localD = 1.f / D[element]; realType localDeltaD = 0.f; realType localDeltaDb = 0.f; for (int batchID = 0; batchID < batchSize; batchID++) { realType val = gradOutput[length * (batchID * groupSize + groupID) + element] * recp_localD; localDeltaD += val * in[length * batchID + element]; localDeltaDb += val; } delta_D[element] += localDeltaD; delta_Db[element] += localDeltaDb; } template<typename realType, typename complexType> int acdc_fp(cudaStream_t stream, int length, int batchSize, int groupSize, cufftHandle planR2C, cufftHandle planC2C, const realType * __restrict__ in, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ Db, realType * __restrict__ out, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_fp: length must be even (%d passed)\n", length); return 1; } cufftSetStream(planR2C, stream); cufftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Two DCTs required. Inverse is handled in the custom setup. // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, in, tmp1); cufftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL); } else { DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, in, tmp1); cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL); } cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, out); return 0; } // NOTE: For the backward pass "in" is bottom, "out" is top, so we write to in. template<typename realType, typename complexType> int acdc_bp(cudaStream_t stream, int length, int batchSize, int groupSize, cufftHandle planR2C, cufftHandle planC2C, realType * __restrict__ delta_in, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ Db, const realType * __restrict__ delta_out, realType * __restrict__ delta_mid, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_bp: length must be even (%d passed)\n", length); return 1; } cufftSetStream(planR2C, stream); cufftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Backward through CD // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, false, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, delta_out, tmp1); cufftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid); } else { DCT_setup<realType, complexType, false, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, delta_out, tmp1); cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid); } // Backward through CA cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, NULL, tmp2, delta_in); return 0; } template<typename realType, typename complexType> int acdc_bp_acc(cudaStream_t stream, int length, int batchSize, int groupSize, cufftHandle planR2C, cufftHandle planC2C, realType * __restrict__ delta_in, realType * __restrict__ delta_mid, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ inputA, realType * __restrict__ inputD, realType * __restrict__ delta_A, realType * __restrict__ delta_Ab, realType * __restrict__ delta_D, realType * __restrict__ delta_Db, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_bp_acc length must be even (%d passed)\n", length); return 1; } cufftSetStream(planR2C, stream); cufftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 32; gridDim.x = (length + blockDim.x - 1) / blockDim.x; updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, inputA, delta_in, delta_A, delta_Ab); blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Forward through AC to calculate input going into D // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, inputA, tmp1); cufftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, inputD); } else { DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, inputA, tmp1); cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); DCT_final<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, inputD); } blockDim.x = 32; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = 1; updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, inputD, delta_mid, delta_D, delta_Db); return 0; }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* NOTES:. * 1) The tmp variables must each have space for length * batchSize * groupSize * sizeof(complexType). * 2) Templated types must be (cufftReal, cufftComplex) or (cufftDoubleReal, cufftDoubleComplex) * 3) Length must be even. * 4) DCT maps to a type-2 DCT. Inverse DCT maps to a type-3 DCT. IDCT(DCT(x)) == x. */ #include <stdio.h> #include <cufft.h> // Useful to have #define ROOT2 1.4142135623730951f // This is quite system dependent. Slower systems would benefit from a smaller value here. #define R2C_SWITCH_SIZE (1 << 19) template<typename realType, typename complexType, bool forward, bool R2C> __global__ void DCT_setup(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } if (A != NULL) { Alocal = A[groupID * length + index]; if (Ab != NULL) { Ablocal = Ab[groupID * length + index]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { realType val; if (forward) val = ((realType*)(in))[length * batchID + index]; else val = ((realType*)(in))[length * (batchID * groupSize + groupID) + index]; if (A != NULL) { val *= Alocal; if (Ab != NULL) { val += Ablocal; } } if (R2C) { ((realType*)(out))[element + length * (batchID * groupSize + groupID)] = (realType)val; } else { complexType outVal; outVal.x = val; outVal.y = 0.f; ((complexType*)(out))[element + length * (batchID * groupSize + groupID)] = outVal; } } } template<typename realType, typename complexType, bool R2C> __global__ void DCT_final(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; if (A != NULL) { Alocal = A[groupID * length + element]; if (Ab != NULL) { Ablocal = Ab[groupID * length + element]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val; if (R2C) { if (element <= length / 2) { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element]; val.y = -val.y; } } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } complexType val2; complexType ret; sincospi(element / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((realType)length); } else { ret.x *= ROOT2 * rsqrt((realType)length); } if (A != NULL) { ret.x *= Alocal; if (Ab != NULL) { ret.x += Ablocal; } } ((realType*)(out))[length * (batchID * groupSize + groupID) + element] = ret.x; } } template<typename realType, typename complexType> __global__ void IDCT_final(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } if (A != NULL) { Alocal = A[groupID * length + index]; if (Ab != NULL) { Ablocal = Ab[groupID * length + index]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; // "A" for backward pass if (A != NULL) { val.x *= Alocal; if (Ab != NULL) { val.x += Ablocal; } } ((realType*)(out))[length * (batchID * groupSize + groupID) + index] = val.x; } } template<typename realType, typename complexType, bool R2C> __global__ void DCT_final_IDCT_setup(int length, int batchSize, int groupSize, const realType * __restrict__ D, const realType * __restrict__ Db, const realType * __restrict__ in, realType * __restrict__ out, realType * __restrict__ deltaMid) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType dlocal; realType dblocal; if (D != NULL) { dlocal = D[groupID * length + element]; if (Db != NULL) { dblocal = Db[groupID * length + element]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val; if (R2C) { if (element <= length / 2) { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element]; val.y = -val.y; } } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } complexType val2; complexType ret; sincospi(element / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((realType)length); } else { ret.x *= ROOT2 * rsqrt((realType)length); } realType re_in = ret.x; if (D != NULL) { re_in *= dlocal; if (Db != NULL) { re_in += dblocal; } } if (deltaMid) { deltaMid[element + length * (batchID * groupSize + groupID)] = re_in; } // Un-normalisation if (element == 0) { re_in *= rsqrtf((realType)length); } else { re_in *= ROOT2 * rsqrtf((realType)length); } sincospi(element / (2.f * length), &(val2.y), &(val2.x)); val.x = re_in * val2.x; val.y = -re_in * val2.y; ((complexType*)(out))[length * (batchID * groupSize + groupID) + element] = val; } } template<typename realType> __global__ void updateWeights(int length, int batchSize, int groupSize, const realType * __restrict__ D, const realType * __restrict__ in, const realType * __restrict__ gradOutput, realType * __restrict__ delta_D, realType * __restrict__ delta_Db) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; D += length * groupID; delta_D += length * groupID; delta_Db += length * groupID; realType recp_localD = 1.f / D[element]; realType localDeltaD = 0.f; realType localDeltaDb = 0.f; for (int batchID = 0; batchID < batchSize; batchID++) { realType val = gradOutput[length * (batchID * groupSize + groupID) + element] * recp_localD; localDeltaD += val * in[length * batchID + element]; localDeltaDb += val; } delta_D[element] += localDeltaD; delta_Db[element] += localDeltaDb; } template<typename realType, typename complexType> int acdc_fp(cudaStream_t stream, int length, int batchSize, int groupSize, cufftHandle planR2C, cufftHandle planC2C, const realType * __restrict__ in, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ Db, realType * __restrict__ out, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_fp: length must be even (%d passed)\n", length); return 1; } cufftSetStream(planR2C, stream); cufftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Two DCTs required. Inverse is handled in the custom setup. // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, in, tmp1); cufftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL); } else { DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, in, tmp1); cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL); } cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, out); return 0; } // NOTE: For the backward pass "in" is bottom, "out" is top, so we write to in. template<typename realType, typename complexType> int acdc_bp(cudaStream_t stream, int length, int batchSize, int groupSize, cufftHandle planR2C, cufftHandle planC2C, realType * __restrict__ delta_in, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ Db, const realType * __restrict__ delta_out, realType * __restrict__ delta_mid, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_bp: length must be even (%d passed)\n", length); return 1; } cufftSetStream(planR2C, stream); cufftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Backward through CD // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, false, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, delta_out, tmp1); cufftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid); } else { DCT_setup<realType, complexType, false, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, delta_out, tmp1); cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid); } // Backward through CA cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, NULL, tmp2, delta_in); return 0; } template<typename realType, typename complexType> int acdc_bp_acc(cudaStream_t stream, int length, int batchSize, int groupSize, cufftHandle planR2C, cufftHandle planC2C, realType * __restrict__ delta_in, realType * __restrict__ delta_mid, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ inputA, realType * __restrict__ inputD, realType * __restrict__ delta_A, realType * __restrict__ delta_Ab, realType * __restrict__ delta_D, realType * __restrict__ delta_Db, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_bp_acc length must be even (%d passed)\n", length); return 1; } cufftSetStream(planR2C, stream); cufftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 32; gridDim.x = (length + blockDim.x - 1) / blockDim.x; updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, inputA, delta_in, delta_A, delta_Ab); blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Forward through AC to calculate input going into D // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, inputA, tmp1); cufftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, inputD); } else { DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, inputA, tmp1); cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); DCT_final<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, inputD); } blockDim.x = 32; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = 1; updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, inputD, delta_mid, delta_D, delta_Db); return 0; }
.file "tmpxft_0019f176_00000000-6_acdc.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2095: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2095: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2118: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2118: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* NOTES:. * 1) The tmp variables must each have space for length * batchSize * groupSize * sizeof(complexType). * 2) Templated types must be (cufftReal, cufftComplex) or (cufftDoubleReal, cufftDoubleComplex) * 3) Length must be even. * 4) DCT maps to a type-2 DCT. Inverse DCT maps to a type-3 DCT. IDCT(DCT(x)) == x. */ #include <stdio.h> #include <cufft.h> // Useful to have #define ROOT2 1.4142135623730951f // This is quite system dependent. Slower systems would benefit from a smaller value here. #define R2C_SWITCH_SIZE (1 << 19) template<typename realType, typename complexType, bool forward, bool R2C> __global__ void DCT_setup(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } if (A != NULL) { Alocal = A[groupID * length + index]; if (Ab != NULL) { Ablocal = Ab[groupID * length + index]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { realType val; if (forward) val = ((realType*)(in))[length * batchID + index]; else val = ((realType*)(in))[length * (batchID * groupSize + groupID) + index]; if (A != NULL) { val *= Alocal; if (Ab != NULL) { val += Ablocal; } } if (R2C) { ((realType*)(out))[element + length * (batchID * groupSize + groupID)] = (realType)val; } else { complexType outVal; outVal.x = val; outVal.y = 0.f; ((complexType*)(out))[element + length * (batchID * groupSize + groupID)] = outVal; } } } template<typename realType, typename complexType, bool R2C> __global__ void DCT_final(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; if (A != NULL) { Alocal = A[groupID * length + element]; if (Ab != NULL) { Ablocal = Ab[groupID * length + element]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val; if (R2C) { if (element <= length / 2) { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element]; val.y = -val.y; } } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } complexType val2; complexType ret; sincospi(element / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((realType)length); } else { ret.x *= ROOT2 * rsqrt((realType)length); } if (A != NULL) { ret.x *= Alocal; if (Ab != NULL) { ret.x += Ablocal; } } ((realType*)(out))[length * (batchID * groupSize + groupID) + element] = ret.x; } } template<typename realType, typename complexType> __global__ void IDCT_final(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } if (A != NULL) { Alocal = A[groupID * length + index]; if (Ab != NULL) { Ablocal = Ab[groupID * length + index]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; // "A" for backward pass if (A != NULL) { val.x *= Alocal; if (Ab != NULL) { val.x += Ablocal; } } ((realType*)(out))[length * (batchID * groupSize + groupID) + index] = val.x; } } template<typename realType, typename complexType, bool R2C> __global__ void DCT_final_IDCT_setup(int length, int batchSize, int groupSize, const realType * __restrict__ D, const realType * __restrict__ Db, const realType * __restrict__ in, realType * __restrict__ out, realType * __restrict__ deltaMid) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType dlocal; realType dblocal; if (D != NULL) { dlocal = D[groupID * length + element]; if (Db != NULL) { dblocal = Db[groupID * length + element]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val; if (R2C) { if (element <= length / 2) { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element]; val.y = -val.y; } } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } complexType val2; complexType ret; sincospi(element / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((realType)length); } else { ret.x *= ROOT2 * rsqrt((realType)length); } realType re_in = ret.x; if (D != NULL) { re_in *= dlocal; if (Db != NULL) { re_in += dblocal; } } if (deltaMid) { deltaMid[element + length * (batchID * groupSize + groupID)] = re_in; } // Un-normalisation if (element == 0) { re_in *= rsqrtf((realType)length); } else { re_in *= ROOT2 * rsqrtf((realType)length); } sincospi(element / (2.f * length), &(val2.y), &(val2.x)); val.x = re_in * val2.x; val.y = -re_in * val2.y; ((complexType*)(out))[length * (batchID * groupSize + groupID) + element] = val; } } template<typename realType> __global__ void updateWeights(int length, int batchSize, int groupSize, const realType * __restrict__ D, const realType * __restrict__ in, const realType * __restrict__ gradOutput, realType * __restrict__ delta_D, realType * __restrict__ delta_Db) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; D += length * groupID; delta_D += length * groupID; delta_Db += length * groupID; realType recp_localD = 1.f / D[element]; realType localDeltaD = 0.f; realType localDeltaDb = 0.f; for (int batchID = 0; batchID < batchSize; batchID++) { realType val = gradOutput[length * (batchID * groupSize + groupID) + element] * recp_localD; localDeltaD += val * in[length * batchID + element]; localDeltaDb += val; } delta_D[element] += localDeltaD; delta_Db[element] += localDeltaDb; } template<typename realType, typename complexType> int acdc_fp(cudaStream_t stream, int length, int batchSize, int groupSize, cufftHandle planR2C, cufftHandle planC2C, const realType * __restrict__ in, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ Db, realType * __restrict__ out, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_fp: length must be even (%d passed)\n", length); return 1; } cufftSetStream(planR2C, stream); cufftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Two DCTs required. Inverse is handled in the custom setup. // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, in, tmp1); cufftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL); } else { DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, in, tmp1); cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL); } cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, out); return 0; } // NOTE: For the backward pass "in" is bottom, "out" is top, so we write to in. template<typename realType, typename complexType> int acdc_bp(cudaStream_t stream, int length, int batchSize, int groupSize, cufftHandle planR2C, cufftHandle planC2C, realType * __restrict__ delta_in, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ Db, const realType * __restrict__ delta_out, realType * __restrict__ delta_mid, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_bp: length must be even (%d passed)\n", length); return 1; } cufftSetStream(planR2C, stream); cufftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Backward through CD // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, false, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, delta_out, tmp1); cufftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid); } else { DCT_setup<realType, complexType, false, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, delta_out, tmp1); cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid); } // Backward through CA cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, NULL, tmp2, delta_in); return 0; } template<typename realType, typename complexType> int acdc_bp_acc(cudaStream_t stream, int length, int batchSize, int groupSize, cufftHandle planR2C, cufftHandle planC2C, realType * __restrict__ delta_in, realType * __restrict__ delta_mid, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ inputA, realType * __restrict__ inputD, realType * __restrict__ delta_A, realType * __restrict__ delta_Ab, realType * __restrict__ delta_D, realType * __restrict__ delta_Db, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_bp_acc length must be even (%d passed)\n", length); return 1; } cufftSetStream(planR2C, stream); cufftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 32; gridDim.x = (length + blockDim.x - 1) / blockDim.x; updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, inputA, delta_in, delta_A, delta_Ab); blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Forward through AC to calculate input going into D // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, inputA, tmp1); cufftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, inputD); } else { DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, inputA, tmp1); cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD); DCT_final<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, inputD); } blockDim.x = 32; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = 1; updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, inputD, delta_mid, delta_D, delta_Db); return 0; }
/* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* NOTES:. * 1) The tmp variables must each have space for length * batchSize * groupSize * sizeof(complexType). * 2) Templated types must be (cufftReal, cufftComplex) or (cufftDoubleReal, cufftDoubleComplex) * 3) Length must be even. * 4) DCT maps to a type-2 DCT. Inverse DCT maps to a type-3 DCT. IDCT(DCT(x)) == x. */ #include <hip/hip_runtime.h> #include <stdio.h> #include <hipfft/hipfft.h> // Useful to have #define ROOT2 1.4142135623730951f // This is quite system dependent. Slower systems would benefit from a smaller value here. #define R2C_SWITCH_SIZE (1 << 19) template<typename realType, typename complexType, bool forward, bool R2C> __global__ void DCT_setup(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } if (A != NULL) { Alocal = A[groupID * length + index]; if (Ab != NULL) { Ablocal = Ab[groupID * length + index]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { realType val; if (forward) val = ((realType*)(in))[length * batchID + index]; else val = ((realType*)(in))[length * (batchID * groupSize + groupID) + index]; if (A != NULL) { val *= Alocal; if (Ab != NULL) { val += Ablocal; } } if (R2C) { ((realType*)(out))[element + length * (batchID * groupSize + groupID)] = (realType)val; } else { complexType outVal; outVal.x = val; outVal.y = 0.f; ((complexType*)(out))[element + length * (batchID * groupSize + groupID)] = outVal; } } } template<typename realType, typename complexType, bool R2C> __global__ void DCT_final(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; if (A != NULL) { Alocal = A[groupID * length + element]; if (Ab != NULL) { Ablocal = Ab[groupID * length + element]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val; if (R2C) { if (element <= length / 2) { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element]; val.y = -val.y; } } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } complexType val2; complexType ret; sincospi(element / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((realType)length); } else { ret.x *= ROOT2 * rsqrt((realType)length); } if (A != NULL) { ret.x *= Alocal; if (Ab != NULL) { ret.x += Ablocal; } } ((realType*)(out))[length * (batchID * groupSize + groupID) + element] = ret.x; } } template<typename realType, typename complexType> __global__ void IDCT_final(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } if (A != NULL) { Alocal = A[groupID * length + index]; if (Ab != NULL) { Ablocal = Ab[groupID * length + index]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; // "A" for backward pass if (A != NULL) { val.x *= Alocal; if (Ab != NULL) { val.x += Ablocal; } } ((realType*)(out))[length * (batchID * groupSize + groupID) + index] = val.x; } } template<typename realType, typename complexType, bool R2C> __global__ void DCT_final_IDCT_setup(int length, int batchSize, int groupSize, const realType * __restrict__ D, const realType * __restrict__ Db, const realType * __restrict__ in, realType * __restrict__ out, realType * __restrict__ deltaMid) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType dlocal; realType dblocal; if (D != NULL) { dlocal = D[groupID * length + element]; if (Db != NULL) { dblocal = Db[groupID * length + element]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val; if (R2C) { if (element <= length / 2) { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element]; val.y = -val.y; } } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } complexType val2; complexType ret; sincospi(element / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((realType)length); } else { ret.x *= ROOT2 * rsqrt((realType)length); } realType re_in = ret.x; if (D != NULL) { re_in *= dlocal; if (Db != NULL) { re_in += dblocal; } } if (deltaMid) { deltaMid[element + length * (batchID * groupSize + groupID)] = re_in; } // Un-normalisation if (element == 0) { re_in *= rsqrtf((realType)length); } else { re_in *= ROOT2 * rsqrtf((realType)length); } sincospi(element / (2.f * length), &(val2.y), &(val2.x)); val.x = re_in * val2.x; val.y = -re_in * val2.y; ((complexType*)(out))[length * (batchID * groupSize + groupID) + element] = val; } } template<typename realType> __global__ void updateWeights(int length, int batchSize, int groupSize, const realType * __restrict__ D, const realType * __restrict__ in, const realType * __restrict__ gradOutput, realType * __restrict__ delta_D, realType * __restrict__ delta_Db) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; D += length * groupID; delta_D += length * groupID; delta_Db += length * groupID; realType recp_localD = 1.f / D[element]; realType localDeltaD = 0.f; realType localDeltaDb = 0.f; for (int batchID = 0; batchID < batchSize; batchID++) { realType val = gradOutput[length * (batchID * groupSize + groupID) + element] * recp_localD; localDeltaD += val * in[length * batchID + element]; localDeltaDb += val; } delta_D[element] += localDeltaD; delta_Db[element] += localDeltaDb; } template<typename realType, typename complexType> int acdc_fp(hipStream_t stream, int length, int batchSize, int groupSize, hipfftHandle planR2C, hipfftHandle planC2C, const realType * __restrict__ in, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ Db, realType * __restrict__ out, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_fp: length must be even (%d passed)\n", length); return 1; } hipfftSetStream(planR2C, stream); hipfftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Two DCTs required. Inverse is handled in the custom setup. // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, in, tmp1); hipfftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL); } else { DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, in, tmp1); hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL); } hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, out); return 0; } // NOTE: For the backward pass "in" is bottom, "out" is top, so we write to in. template<typename realType, typename complexType> int acdc_bp(hipStream_t stream, int length, int batchSize, int groupSize, hipfftHandle planR2C, hipfftHandle planC2C, realType * __restrict__ delta_in, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ Db, const realType * __restrict__ delta_out, realType * __restrict__ delta_mid, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_bp: length must be even (%d passed)\n", length); return 1; } hipfftSetStream(planR2C, stream); hipfftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Backward through CD // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, false, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, delta_out, tmp1); hipfftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid); } else { DCT_setup<realType, complexType, false, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, delta_out, tmp1); hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid); } // Backward through CA hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, NULL, tmp2, delta_in); return 0; } template<typename realType, typename complexType> int acdc_bp_acc(hipStream_t stream, int length, int batchSize, int groupSize, hipfftHandle planR2C, hipfftHandle planC2C, realType * __restrict__ delta_in, realType * __restrict__ delta_mid, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ inputA, realType * __restrict__ inputD, realType * __restrict__ delta_A, realType * __restrict__ delta_Ab, realType * __restrict__ delta_D, realType * __restrict__ delta_Db, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_bp_acc length must be even (%d passed)\n", length); return 1; } hipfftSetStream(planR2C, stream); hipfftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 32; gridDim.x = (length + blockDim.x - 1) / blockDim.x; updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, inputA, delta_in, delta_A, delta_Ab); blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Forward through AC to calculate input going into D // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, inputA, tmp1); hipfftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, inputD); } else { DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, inputA, tmp1); hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); DCT_final<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, inputD); } blockDim.x = 32; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = 1; updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, inputD, delta_mid, delta_D, delta_Db); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* NOTES:. * 1) The tmp variables must each have space for length * batchSize * groupSize * sizeof(complexType). * 2) Templated types must be (cufftReal, cufftComplex) or (cufftDoubleReal, cufftDoubleComplex) * 3) Length must be even. * 4) DCT maps to a type-2 DCT. Inverse DCT maps to a type-3 DCT. IDCT(DCT(x)) == x. */ #include <hip/hip_runtime.h> #include <stdio.h> #include <hipfft/hipfft.h> // Useful to have #define ROOT2 1.4142135623730951f // This is quite system dependent. Slower systems would benefit from a smaller value here. #define R2C_SWITCH_SIZE (1 << 19) template<typename realType, typename complexType, bool forward, bool R2C> __global__ void DCT_setup(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } if (A != NULL) { Alocal = A[groupID * length + index]; if (Ab != NULL) { Ablocal = Ab[groupID * length + index]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { realType val; if (forward) val = ((realType*)(in))[length * batchID + index]; else val = ((realType*)(in))[length * (batchID * groupSize + groupID) + index]; if (A != NULL) { val *= Alocal; if (Ab != NULL) { val += Ablocal; } } if (R2C) { ((realType*)(out))[element + length * (batchID * groupSize + groupID)] = (realType)val; } else { complexType outVal; outVal.x = val; outVal.y = 0.f; ((complexType*)(out))[element + length * (batchID * groupSize + groupID)] = outVal; } } } template<typename realType, typename complexType, bool R2C> __global__ void DCT_final(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; if (A != NULL) { Alocal = A[groupID * length + element]; if (Ab != NULL) { Ablocal = Ab[groupID * length + element]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val; if (R2C) { if (element <= length / 2) { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element]; val.y = -val.y; } } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } complexType val2; complexType ret; sincospi(element / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((realType)length); } else { ret.x *= ROOT2 * rsqrt((realType)length); } if (A != NULL) { ret.x *= Alocal; if (Ab != NULL) { ret.x += Ablocal; } } ((realType*)(out))[length * (batchID * groupSize + groupID) + element] = ret.x; } } template<typename realType, typename complexType> __global__ void IDCT_final(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } if (A != NULL) { Alocal = A[groupID * length + index]; if (Ab != NULL) { Ablocal = Ab[groupID * length + index]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; // "A" for backward pass if (A != NULL) { val.x *= Alocal; if (Ab != NULL) { val.x += Ablocal; } } ((realType*)(out))[length * (batchID * groupSize + groupID) + index] = val.x; } } template<typename realType, typename complexType, bool R2C> __global__ void DCT_final_IDCT_setup(int length, int batchSize, int groupSize, const realType * __restrict__ D, const realType * __restrict__ Db, const realType * __restrict__ in, realType * __restrict__ out, realType * __restrict__ deltaMid) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType dlocal; realType dblocal; if (D != NULL) { dlocal = D[groupID * length + element]; if (Db != NULL) { dblocal = Db[groupID * length + element]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val; if (R2C) { if (element <= length / 2) { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element]; val.y = -val.y; } } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } complexType val2; complexType ret; sincospi(element / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((realType)length); } else { ret.x *= ROOT2 * rsqrt((realType)length); } realType re_in = ret.x; if (D != NULL) { re_in *= dlocal; if (Db != NULL) { re_in += dblocal; } } if (deltaMid) { deltaMid[element + length * (batchID * groupSize + groupID)] = re_in; } // Un-normalisation if (element == 0) { re_in *= rsqrtf((realType)length); } else { re_in *= ROOT2 * rsqrtf((realType)length); } sincospi(element / (2.f * length), &(val2.y), &(val2.x)); val.x = re_in * val2.x; val.y = -re_in * val2.y; ((complexType*)(out))[length * (batchID * groupSize + groupID) + element] = val; } } template<typename realType> __global__ void updateWeights(int length, int batchSize, int groupSize, const realType * __restrict__ D, const realType * __restrict__ in, const realType * __restrict__ gradOutput, realType * __restrict__ delta_D, realType * __restrict__ delta_Db) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; D += length * groupID; delta_D += length * groupID; delta_Db += length * groupID; realType recp_localD = 1.f / D[element]; realType localDeltaD = 0.f; realType localDeltaDb = 0.f; for (int batchID = 0; batchID < batchSize; batchID++) { realType val = gradOutput[length * (batchID * groupSize + groupID) + element] * recp_localD; localDeltaD += val * in[length * batchID + element]; localDeltaDb += val; } delta_D[element] += localDeltaD; delta_Db[element] += localDeltaDb; } template<typename realType, typename complexType> int acdc_fp(hipStream_t stream, int length, int batchSize, int groupSize, hipfftHandle planR2C, hipfftHandle planC2C, const realType * __restrict__ in, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ Db, realType * __restrict__ out, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_fp: length must be even (%d passed)\n", length); return 1; } hipfftSetStream(planR2C, stream); hipfftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Two DCTs required. Inverse is handled in the custom setup. // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, in, tmp1); hipfftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL); } else { DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, in, tmp1); hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL); } hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, out); return 0; } // NOTE: For the backward pass "in" is bottom, "out" is top, so we write to in. template<typename realType, typename complexType> int acdc_bp(hipStream_t stream, int length, int batchSize, int groupSize, hipfftHandle planR2C, hipfftHandle planC2C, realType * __restrict__ delta_in, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ Db, const realType * __restrict__ delta_out, realType * __restrict__ delta_mid, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_bp: length must be even (%d passed)\n", length); return 1; } hipfftSetStream(planR2C, stream); hipfftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Backward through CD // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, false, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, delta_out, tmp1); hipfftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid); } else { DCT_setup<realType, complexType, false, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, delta_out, tmp1); hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid); } // Backward through CA hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, NULL, tmp2, delta_in); return 0; } template<typename realType, typename complexType> int acdc_bp_acc(hipStream_t stream, int length, int batchSize, int groupSize, hipfftHandle planR2C, hipfftHandle planC2C, realType * __restrict__ delta_in, realType * __restrict__ delta_mid, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ inputA, realType * __restrict__ inputD, realType * __restrict__ delta_A, realType * __restrict__ delta_Ab, realType * __restrict__ delta_D, realType * __restrict__ delta_Db, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_bp_acc length must be even (%d passed)\n", length); return 1; } hipfftSetStream(planR2C, stream); hipfftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 32; gridDim.x = (length + blockDim.x - 1) / blockDim.x; updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, inputA, delta_in, delta_A, delta_Ab); blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Forward through AC to calculate input going into D // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, inputA, tmp1); hipfftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, inputD); } else { DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, inputA, tmp1); hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); DCT_final<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, inputD); } blockDim.x = 32; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = 1; updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, inputD, delta_mid, delta_D, delta_Db); return 0; }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* NOTES:. * 1) The tmp variables must each have space for length * batchSize * groupSize * sizeof(complexType). * 2) Templated types must be (cufftReal, cufftComplex) or (cufftDoubleReal, cufftDoubleComplex) * 3) Length must be even. * 4) DCT maps to a type-2 DCT. Inverse DCT maps to a type-3 DCT. IDCT(DCT(x)) == x. */ #include <hip/hip_runtime.h> #include <stdio.h> #include <hipfft/hipfft.h> // Useful to have #define ROOT2 1.4142135623730951f // This is quite system dependent. Slower systems would benefit from a smaller value here. #define R2C_SWITCH_SIZE (1 << 19) template<typename realType, typename complexType, bool forward, bool R2C> __global__ void DCT_setup(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } if (A != NULL) { Alocal = A[groupID * length + index]; if (Ab != NULL) { Ablocal = Ab[groupID * length + index]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { realType val; if (forward) val = ((realType*)(in))[length * batchID + index]; else val = ((realType*)(in))[length * (batchID * groupSize + groupID) + index]; if (A != NULL) { val *= Alocal; if (Ab != NULL) { val += Ablocal; } } if (R2C) { ((realType*)(out))[element + length * (batchID * groupSize + groupID)] = (realType)val; } else { complexType outVal; outVal.x = val; outVal.y = 0.f; ((complexType*)(out))[element + length * (batchID * groupSize + groupID)] = outVal; } } } template<typename realType, typename complexType, bool R2C> __global__ void DCT_final(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; if (A != NULL) { Alocal = A[groupID * length + element]; if (Ab != NULL) { Ablocal = Ab[groupID * length + element]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val; if (R2C) { if (element <= length / 2) { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element]; val.y = -val.y; } } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } complexType val2; complexType ret; sincospi(element / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((realType)length); } else { ret.x *= ROOT2 * rsqrt((realType)length); } if (A != NULL) { ret.x *= Alocal; if (Ab != NULL) { ret.x += Ablocal; } } ((realType*)(out))[length * (batchID * groupSize + groupID) + element] = ret.x; } } template<typename realType, typename complexType> __global__ void IDCT_final(int length, int batchSize, int groupSize, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ in, realType * __restrict__ out) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType Alocal; realType Ablocal; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } if (A != NULL) { Alocal = A[groupID * length + index]; if (Ab != NULL) { Ablocal = Ab[groupID * length + index]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; // "A" for backward pass if (A != NULL) { val.x *= Alocal; if (Ab != NULL) { val.x += Ablocal; } } ((realType*)(out))[length * (batchID * groupSize + groupID) + index] = val.x; } } template<typename realType, typename complexType, bool R2C> __global__ void DCT_final_IDCT_setup(int length, int batchSize, int groupSize, const realType * __restrict__ D, const realType * __restrict__ Db, const realType * __restrict__ in, realType * __restrict__ out, realType * __restrict__ deltaMid) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; realType dlocal; realType dblocal; if (D != NULL) { dlocal = D[groupID * length + element]; if (Db != NULL) { dblocal = Db[groupID * length + element]; } } for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) { complexType val; if (R2C) { if (element <= length / 2) { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element]; val.y = -val.y; } } else { val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element]; } complexType val2; complexType ret; sincospi(element / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((realType)length); } else { ret.x *= ROOT2 * rsqrt((realType)length); } realType re_in = ret.x; if (D != NULL) { re_in *= dlocal; if (Db != NULL) { re_in += dblocal; } } if (deltaMid) { deltaMid[element + length * (batchID * groupSize + groupID)] = re_in; } // Un-normalisation if (element == 0) { re_in *= rsqrtf((realType)length); } else { re_in *= ROOT2 * rsqrtf((realType)length); } sincospi(element / (2.f * length), &(val2.y), &(val2.x)); val.x = re_in * val2.x; val.y = -re_in * val2.y; ((complexType*)(out))[length * (batchID * groupSize + groupID) + element] = val; } } template<typename realType> __global__ void updateWeights(int length, int batchSize, int groupSize, const realType * __restrict__ D, const realType * __restrict__ in, const realType * __restrict__ gradOutput, realType * __restrict__ delta_D, realType * __restrict__ delta_Db) { int element = blockIdx.x * blockDim.x + threadIdx.x; if (element >= length) return; int groupID = blockIdx.y; D += length * groupID; delta_D += length * groupID; delta_Db += length * groupID; realType recp_localD = 1.f / D[element]; realType localDeltaD = 0.f; realType localDeltaDb = 0.f; for (int batchID = 0; batchID < batchSize; batchID++) { realType val = gradOutput[length * (batchID * groupSize + groupID) + element] * recp_localD; localDeltaD += val * in[length * batchID + element]; localDeltaDb += val; } delta_D[element] += localDeltaD; delta_Db[element] += localDeltaDb; } template<typename realType, typename complexType> int acdc_fp(hipStream_t stream, int length, int batchSize, int groupSize, hipfftHandle planR2C, hipfftHandle planC2C, const realType * __restrict__ in, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ Db, realType * __restrict__ out, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_fp: length must be even (%d passed)\n", length); return 1; } hipfftSetStream(planR2C, stream); hipfftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Two DCTs required. Inverse is handled in the custom setup. // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, in, tmp1); hipfftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL); } else { DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, in, tmp1); hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL); } hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, out); return 0; } // NOTE: For the backward pass "in" is bottom, "out" is top, so we write to in. template<typename realType, typename complexType> int acdc_bp(hipStream_t stream, int length, int batchSize, int groupSize, hipfftHandle planR2C, hipfftHandle planC2C, realType * __restrict__ delta_in, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ Db, const realType * __restrict__ delta_out, realType * __restrict__ delta_mid, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_bp: length must be even (%d passed)\n", length); return 1; } hipfftSetStream(planR2C, stream); hipfftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Backward through CD // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, false, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, delta_out, tmp1); hipfftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid); } else { DCT_setup<realType, complexType, false, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, delta_out, tmp1); hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid); } // Backward through CA hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, NULL, tmp2, delta_in); return 0; } template<typename realType, typename complexType> int acdc_bp_acc(hipStream_t stream, int length, int batchSize, int groupSize, hipfftHandle planR2C, hipfftHandle planC2C, realType * __restrict__ delta_in, realType * __restrict__ delta_mid, const realType * __restrict__ A, const realType * __restrict__ Ab, const realType * __restrict__ D, const realType * __restrict__ inputA, realType * __restrict__ inputD, realType * __restrict__ delta_A, realType * __restrict__ delta_Ab, realType * __restrict__ delta_D, realType * __restrict__ delta_Db, realType * __restrict__ tmp1, realType * __restrict__ tmp2) { if (length & 1) { printf("acdc_bp_acc length must be even (%d passed)\n", length); return 1; } hipfftSetStream(planR2C, stream); hipfftSetStream(planC2C, stream); dim3 blockDim; dim3 gridDim; gridDim.y = groupSize; blockDim.x = 32; gridDim.x = (length + blockDim.x - 1) / blockDim.x; updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, inputA, delta_in, delta_A, delta_Ab); blockDim.x = 128; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = (batchSize + 1) / 2; // Forward through AC to calculate input going into D // R2C is only faster for longer sequences (launch latency vs bandwidth) if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) { DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, inputA, tmp1); hipfftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2); DCT_final<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, inputD); } else { DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, inputA, tmp1); hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD); DCT_final<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, inputD); } blockDim.x = 32; gridDim.x = (length + blockDim.x - 1) / blockDim.x; gridDim.z = 1; updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, inputD, delta_mid, delta_D, delta_Db); return 0; }
.text .file "acdc.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0019f176_00000000-6_acdc.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2095: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2095: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2118: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2118: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "acdc.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
extern "C" #define n (3) #define qPoints (58) #define qPolygons (96) __global__ void obtainPolygonsSteps(int* dev_S,int* dev_polygonToFillX,int* dev_polygonToFillY,int* dev_shipLocationX, int* dev_shipLocationZ,float* dev_matrixC,int* dev_points,int* dev_polygons,int* dev_normals,const int N) { int j = threadIdx.x; if (j<N){ //__shared__ float observerCoord[3*qPoints]; for (int i = 0; i < qPoints; i++) { observerCoord[3*i] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[0] + dev_points[3*i+1] * dev_matrixC[1] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[2] +dev_matrixC[3]; observerCoord[3*i+1] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[4] + dev_points[3*i+1] * dev_matrixC[5] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[6] + dev_matrixC[7]; observerCoord[3*i+2] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[8] + dev_points[3*i+1] * dev_matrixC[9] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[10] + dev_matrixC[11]; } /* float to2d[2*qPoints]; for (int i = 0; i < qPoints; i++) { to2d[2*i] = 0.6 * observerCoord[3*i+2] / observerCoord[3*i]; to2d[2*i+1] = 0.6 * observerCoord[3*i+1] / observerCoord[3*i]; } // ---------------to2dmm---------------------------------------------- double to2dmm[2*qPoints]; for (int i = 0; i < qPoints; i++) { to2dmm[2*i] = 160 * to2d[2*i]; to2dmm[2*i+1] = 160 * to2d[2*i+1]; } // -----------------------spX, spY-------------------------------------- int spX[qPoints]; int spY[qPoints]; for (int i = 0; i < qPoints; i++) { spX[i] = (int) (4.35 * to2dmm[2*i]); spY[i] = (int)(4.35 * to2dmm[2*i+1]); } int xp[qPoints]; int yp[qPoints]; for (int i = 0; i < qPoints; i++) { xp[i] = spX[i] + 350; yp[i] = -spY[i] + 350; } */ int xp[qPoints]; int yp[qPoints]; for (int i = 0; i < qPoints; i++) { xp[i] = (int) (4.35f * 160 * 0.6f * observerCoord[3*i+2] / observerCoord[3*i]) + 350; yp[i] = -(int) (4.35f * 160 * 0.6f * observerCoord[3*i+1] / observerCoord[3*i]) + 350; } float NormalObs[3*qPolygons]; for (int i = 0; i < qPolygons; i++) { NormalObs[3*i] = dev_normals[3*i] * dev_matrixC[0] + dev_normals[3*i+1] * dev_matrixC[1] + dev_normals[3*i+2] * dev_matrixC[2]; NormalObs[3*i+1] = dev_normals[3*i] * dev_matrixC[4] + dev_normals[3*i+1] * dev_matrixC[5] + dev_normals[3*i+2] *dev_matrixC[6]; NormalObs[3*i+2] = dev_normals[3*i] * dev_matrixC[8] + dev_normals[3*i+1] * dev_matrixC[9] + dev_normals[3*i+2] * dev_matrixC[10]; } for (int i = 0; i < qPolygons; i++) { if ( ( NormalObs[3*i] * observerCoord[3*dev_polygons[3*i]] + NormalObs[3*i+1] *observerCoord[3*dev_polygons[3*i]+1] + NormalObs[3*i+2] * observerCoord[3*dev_polygons[3*i]+2] )<0){ dev_S[j*qPolygons+i]=1; } else { dev_S[j*qPolygons+i]=0; } } for (int i = 0; i < qPolygons; i++) { dev_polygonToFillX[j*qPolygons*n+3*i] = xp[dev_polygons[3*i]]; dev_polygonToFillX[j*qPolygons*n+3*i+1] = xp[dev_polygons[3*i+1]]; dev_polygonToFillX[j*qPolygons*n+3*i+2] = xp[dev_polygons[3*i+2]]; dev_polygonToFillY[j*qPolygons*n+3*i] = yp[dev_polygons[3*i]]; dev_polygonToFillY[j*qPolygons*n+3*i+1] = yp[dev_polygons[3*i+1]]; dev_polygonToFillY[j*qPolygons*n+3*i+2] = yp[dev_polygons[3*i+2]]; } } }
.file "tmpxft_0003974c_00000000-6_obtainPolygonsSteps.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z56__device_stub__Z19obtainPolygonsStepsPiS_S_S_S_PfS_S_S_iPiS_S_S_S_PfS_S_S_i .type _Z56__device_stub__Z19obtainPolygonsStepsPiS_S_S_S_PfS_S_S_iPiS_S_S_S_PfS_S_S_i, @function _Z56__device_stub__Z19obtainPolygonsStepsPiS_S_S_S_PfS_S_S_iPiS_S_S_S_PfS_S_S_i: .LFB2051: .cfi_startproc endbr64 subq $248, %rsp .cfi_def_cfa_offset 256 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movq %rcx, 48(%rsp) movq %r8, 40(%rsp) movq %r9, 32(%rsp) movq 256(%rsp), %rax movq %rax, 24(%rsp) movq 264(%rsp), %rax movq %rax, 16(%rsp) movq 272(%rsp), %rax movq %rax, 8(%rsp) movq %fs:40, %rax movq %rax, 232(%rsp) xorl %eax, %eax leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) leaq 56(%rsp), %rax movq %rax, 160(%rsp) leaq 48(%rsp), %rax movq %rax, 168(%rsp) leaq 40(%rsp), %rax movq %rax, 176(%rsp) leaq 32(%rsp), %rax movq %rax, 184(%rsp) leaq 24(%rsp), %rax movq %rax, 192(%rsp) leaq 16(%rsp), %rax movq %rax, 200(%rsp) leaq 8(%rsp), %rax movq %rax, 208(%rsp) leaq 280(%rsp), %rax movq %rax, 216(%rsp) movl $1, 96(%rsp) movl $1, 100(%rsp) movl $1, 104(%rsp) movl $1, 108(%rsp) movl $1, 112(%rsp) movl $1, 116(%rsp) leaq 88(%rsp), %rcx leaq 80(%rsp), %rdx leaq 108(%rsp), %rsi leaq 96(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 232(%rsp), %rax subq %fs:40, %rax jne .L8 addq $248, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 88(%rsp) .cfi_def_cfa_offset 264 pushq 88(%rsp) .cfi_def_cfa_offset 272 leaq 160(%rsp), %r9 movq 124(%rsp), %rcx movl 132(%rsp), %r8d movq 112(%rsp), %rsi movl 120(%rsp), %edx leaq obtainPolygonsSteps(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 256 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z56__device_stub__Z19obtainPolygonsStepsPiS_S_S_S_PfS_S_S_iPiS_S_S_S_PfS_S_S_i, .-_Z56__device_stub__Z19obtainPolygonsStepsPiS_S_S_S_PfS_S_S_iPiS_S_S_S_PfS_S_S_i .globl obtainPolygonsSteps .type obtainPolygonsSteps, @function obtainPolygonsSteps: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movl 40(%rsp), %eax pushq %rax .cfi_def_cfa_offset 24 pushq 40(%rsp) .cfi_def_cfa_offset 32 pushq 40(%rsp) .cfi_def_cfa_offset 40 pushq 40(%rsp) .cfi_def_cfa_offset 48 call _Z56__device_stub__Z19obtainPolygonsStepsPiS_S_S_S_PfS_S_S_iPiS_S_S_S_PfS_S_S_i addq $40, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size obtainPolygonsSteps, .-obtainPolygonsSteps .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "obtainPolygonsSteps" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq obtainPolygonsSteps(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
extern "C" #define n (3) #define qPoints (58) #define qPolygons (96) __global__ void obtainPolygonsSteps(int* dev_S,int* dev_polygonToFillX,int* dev_polygonToFillY,int* dev_shipLocationX, int* dev_shipLocationZ,float* dev_matrixC,int* dev_points,int* dev_polygons,int* dev_normals,const int N) { int j = threadIdx.x; if (j<N){ //__shared__ float observerCoord[3*qPoints]; for (int i = 0; i < qPoints; i++) { observerCoord[3*i] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[0] + dev_points[3*i+1] * dev_matrixC[1] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[2] +dev_matrixC[3]; observerCoord[3*i+1] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[4] + dev_points[3*i+1] * dev_matrixC[5] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[6] + dev_matrixC[7]; observerCoord[3*i+2] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[8] + dev_points[3*i+1] * dev_matrixC[9] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[10] + dev_matrixC[11]; } /* float to2d[2*qPoints]; for (int i = 0; i < qPoints; i++) { to2d[2*i] = 0.6 * observerCoord[3*i+2] / observerCoord[3*i]; to2d[2*i+1] = 0.6 * observerCoord[3*i+1] / observerCoord[3*i]; } // ---------------to2dmm---------------------------------------------- double to2dmm[2*qPoints]; for (int i = 0; i < qPoints; i++) { to2dmm[2*i] = 160 * to2d[2*i]; to2dmm[2*i+1] = 160 * to2d[2*i+1]; } // -----------------------spX, spY-------------------------------------- int spX[qPoints]; int spY[qPoints]; for (int i = 0; i < qPoints; i++) { spX[i] = (int) (4.35 * to2dmm[2*i]); spY[i] = (int)(4.35 * to2dmm[2*i+1]); } int xp[qPoints]; int yp[qPoints]; for (int i = 0; i < qPoints; i++) { xp[i] = spX[i] + 350; yp[i] = -spY[i] + 350; } */ int xp[qPoints]; int yp[qPoints]; for (int i = 0; i < qPoints; i++) { xp[i] = (int) (4.35f * 160 * 0.6f * observerCoord[3*i+2] / observerCoord[3*i]) + 350; yp[i] = -(int) (4.35f * 160 * 0.6f * observerCoord[3*i+1] / observerCoord[3*i]) + 350; } float NormalObs[3*qPolygons]; for (int i = 0; i < qPolygons; i++) { NormalObs[3*i] = dev_normals[3*i] * dev_matrixC[0] + dev_normals[3*i+1] * dev_matrixC[1] + dev_normals[3*i+2] * dev_matrixC[2]; NormalObs[3*i+1] = dev_normals[3*i] * dev_matrixC[4] + dev_normals[3*i+1] * dev_matrixC[5] + dev_normals[3*i+2] *dev_matrixC[6]; NormalObs[3*i+2] = dev_normals[3*i] * dev_matrixC[8] + dev_normals[3*i+1] * dev_matrixC[9] + dev_normals[3*i+2] * dev_matrixC[10]; } for (int i = 0; i < qPolygons; i++) { if ( ( NormalObs[3*i] * observerCoord[3*dev_polygons[3*i]] + NormalObs[3*i+1] *observerCoord[3*dev_polygons[3*i]+1] + NormalObs[3*i+2] * observerCoord[3*dev_polygons[3*i]+2] )<0){ dev_S[j*qPolygons+i]=1; } else { dev_S[j*qPolygons+i]=0; } } for (int i = 0; i < qPolygons; i++) { dev_polygonToFillX[j*qPolygons*n+3*i] = xp[dev_polygons[3*i]]; dev_polygonToFillX[j*qPolygons*n+3*i+1] = xp[dev_polygons[3*i+1]]; dev_polygonToFillX[j*qPolygons*n+3*i+2] = xp[dev_polygons[3*i+2]]; dev_polygonToFillY[j*qPolygons*n+3*i] = yp[dev_polygons[3*i]]; dev_polygonToFillY[j*qPolygons*n+3*i+1] = yp[dev_polygons[3*i+1]]; dev_polygonToFillY[j*qPolygons*n+3*i+2] = yp[dev_polygons[3*i+2]]; } } }
#include <hip/hip_runtime.h> extern "C" #define n (3) #define qPoints (58) #define qPolygons (96) __global__ void obtainPolygonsSteps(int* dev_S,int* dev_polygonToFillX,int* dev_polygonToFillY,int* dev_shipLocationX, int* dev_shipLocationZ,float* dev_matrixC,int* dev_points,int* dev_polygons,int* dev_normals,const int N) { int j = threadIdx.x; if (j<N){ //__shared__ float observerCoord[3*qPoints]; for (int i = 0; i < qPoints; i++) { observerCoord[3*i] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[0] + dev_points[3*i+1] * dev_matrixC[1] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[2] +dev_matrixC[3]; observerCoord[3*i+1] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[4] + dev_points[3*i+1] * dev_matrixC[5] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[6] + dev_matrixC[7]; observerCoord[3*i+2] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[8] + dev_points[3*i+1] * dev_matrixC[9] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[10] + dev_matrixC[11]; } /* float to2d[2*qPoints]; for (int i = 0; i < qPoints; i++) { to2d[2*i] = 0.6 * observerCoord[3*i+2] / observerCoord[3*i]; to2d[2*i+1] = 0.6 * observerCoord[3*i+1] / observerCoord[3*i]; } // ---------------to2dmm---------------------------------------------- double to2dmm[2*qPoints]; for (int i = 0; i < qPoints; i++) { to2dmm[2*i] = 160 * to2d[2*i]; to2dmm[2*i+1] = 160 * to2d[2*i+1]; } // -----------------------spX, spY-------------------------------------- int spX[qPoints]; int spY[qPoints]; for (int i = 0; i < qPoints; i++) { spX[i] = (int) (4.35 * to2dmm[2*i]); spY[i] = (int)(4.35 * to2dmm[2*i+1]); } int xp[qPoints]; int yp[qPoints]; for (int i = 0; i < qPoints; i++) { xp[i] = spX[i] + 350; yp[i] = -spY[i] + 350; } */ int xp[qPoints]; int yp[qPoints]; for (int i = 0; i < qPoints; i++) { xp[i] = (int) (4.35f * 160 * 0.6f * observerCoord[3*i+2] / observerCoord[3*i]) + 350; yp[i] = -(int) (4.35f * 160 * 0.6f * observerCoord[3*i+1] / observerCoord[3*i]) + 350; } float NormalObs[3*qPolygons]; for (int i = 0; i < qPolygons; i++) { NormalObs[3*i] = dev_normals[3*i] * dev_matrixC[0] + dev_normals[3*i+1] * dev_matrixC[1] + dev_normals[3*i+2] * dev_matrixC[2]; NormalObs[3*i+1] = dev_normals[3*i] * dev_matrixC[4] + dev_normals[3*i+1] * dev_matrixC[5] + dev_normals[3*i+2] *dev_matrixC[6]; NormalObs[3*i+2] = dev_normals[3*i] * dev_matrixC[8] + dev_normals[3*i+1] * dev_matrixC[9] + dev_normals[3*i+2] * dev_matrixC[10]; } for (int i = 0; i < qPolygons; i++) { if ( ( NormalObs[3*i] * observerCoord[3*dev_polygons[3*i]] + NormalObs[3*i+1] *observerCoord[3*dev_polygons[3*i]+1] + NormalObs[3*i+2] * observerCoord[3*dev_polygons[3*i]+2] )<0){ dev_S[j*qPolygons+i]=1; } else { dev_S[j*qPolygons+i]=0; } } for (int i = 0; i < qPolygons; i++) { dev_polygonToFillX[j*qPolygons*n+3*i] = xp[dev_polygons[3*i]]; dev_polygonToFillX[j*qPolygons*n+3*i+1] = xp[dev_polygons[3*i+1]]; dev_polygonToFillX[j*qPolygons*n+3*i+2] = xp[dev_polygons[3*i+2]]; dev_polygonToFillY[j*qPolygons*n+3*i] = yp[dev_polygons[3*i]]; dev_polygonToFillY[j*qPolygons*n+3*i+1] = yp[dev_polygons[3*i+1]]; dev_polygonToFillY[j*qPolygons*n+3*i+2] = yp[dev_polygons[3*i+2]]; } } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> extern "C" #define n (3) #define qPoints (58) #define qPolygons (96) __global__ void obtainPolygonsSteps(int* dev_S,int* dev_polygonToFillX,int* dev_polygonToFillY,int* dev_shipLocationX, int* dev_shipLocationZ,float* dev_matrixC,int* dev_points,int* dev_polygons,int* dev_normals,const int N) { int j = threadIdx.x; if (j<N){ //__shared__ float observerCoord[3*qPoints]; for (int i = 0; i < qPoints; i++) { observerCoord[3*i] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[0] + dev_points[3*i+1] * dev_matrixC[1] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[2] +dev_matrixC[3]; observerCoord[3*i+1] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[4] + dev_points[3*i+1] * dev_matrixC[5] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[6] + dev_matrixC[7]; observerCoord[3*i+2] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[8] + dev_points[3*i+1] * dev_matrixC[9] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[10] + dev_matrixC[11]; } /* float to2d[2*qPoints]; for (int i = 0; i < qPoints; i++) { to2d[2*i] = 0.6 * observerCoord[3*i+2] / observerCoord[3*i]; to2d[2*i+1] = 0.6 * observerCoord[3*i+1] / observerCoord[3*i]; } // ---------------to2dmm---------------------------------------------- double to2dmm[2*qPoints]; for (int i = 0; i < qPoints; i++) { to2dmm[2*i] = 160 * to2d[2*i]; to2dmm[2*i+1] = 160 * to2d[2*i+1]; } // -----------------------spX, spY-------------------------------------- int spX[qPoints]; int spY[qPoints]; for (int i = 0; i < qPoints; i++) { spX[i] = (int) (4.35 * to2dmm[2*i]); spY[i] = (int)(4.35 * to2dmm[2*i+1]); } int xp[qPoints]; int yp[qPoints]; for (int i = 0; i < qPoints; i++) { xp[i] = spX[i] + 350; yp[i] = -spY[i] + 350; } */ int xp[qPoints]; int yp[qPoints]; for (int i = 0; i < qPoints; i++) { xp[i] = (int) (4.35f * 160 * 0.6f * observerCoord[3*i+2] / observerCoord[3*i]) + 350; yp[i] = -(int) (4.35f * 160 * 0.6f * observerCoord[3*i+1] / observerCoord[3*i]) + 350; } float NormalObs[3*qPolygons]; for (int i = 0; i < qPolygons; i++) { NormalObs[3*i] = dev_normals[3*i] * dev_matrixC[0] + dev_normals[3*i+1] * dev_matrixC[1] + dev_normals[3*i+2] * dev_matrixC[2]; NormalObs[3*i+1] = dev_normals[3*i] * dev_matrixC[4] + dev_normals[3*i+1] * dev_matrixC[5] + dev_normals[3*i+2] *dev_matrixC[6]; NormalObs[3*i+2] = dev_normals[3*i] * dev_matrixC[8] + dev_normals[3*i+1] * dev_matrixC[9] + dev_normals[3*i+2] * dev_matrixC[10]; } for (int i = 0; i < qPolygons; i++) { if ( ( NormalObs[3*i] * observerCoord[3*dev_polygons[3*i]] + NormalObs[3*i+1] *observerCoord[3*dev_polygons[3*i]+1] + NormalObs[3*i+2] * observerCoord[3*dev_polygons[3*i]+2] )<0){ dev_S[j*qPolygons+i]=1; } else { dev_S[j*qPolygons+i]=0; } } for (int i = 0; i < qPolygons; i++) { dev_polygonToFillX[j*qPolygons*n+3*i] = xp[dev_polygons[3*i]]; dev_polygonToFillX[j*qPolygons*n+3*i+1] = xp[dev_polygons[3*i+1]]; dev_polygonToFillX[j*qPolygons*n+3*i+2] = xp[dev_polygons[3*i+2]]; dev_polygonToFillY[j*qPolygons*n+3*i] = yp[dev_polygons[3*i]]; dev_polygonToFillY[j*qPolygons*n+3*i+1] = yp[dev_polygons[3*i+1]]; dev_polygonToFillY[j*qPolygons*n+3*i+2] = yp[dev_polygons[3*i+2]]; } } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected obtainPolygonsSteps .globl obtainPolygonsSteps .p2align 8 .type obtainPolygonsSteps,@function obtainPolygonsSteps: s_load_b32 s2, s[0:1], 0x48 s_waitcnt lgkmcnt(0) v_cmp_gt_i32_e32 vcc_lo, s2, v0 s_and_saveexec_b32 s2, vcc_lo s_cbranch_execz .LBB0_11 s_load_b256 s[4:11], s[0:1], 0x18 v_lshlrev_b32_e32 v2, 2, v0 s_waitcnt lgkmcnt(0) s_clause 0x1 global_load_b32 v1, v2, s[4:5] global_load_b32 v2, v2, s[6:7] s_clause 0x1 s_load_b256 s[12:19], s[8:9], 0x0 s_load_b128 s[4:7], s[8:9], 0x20 s_add_u32 s2, s10, 4 s_addc_u32 s3, s11, 0 s_mov_b32 s10, 0 s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB0_2: s_add_u32 s20, s2, -4 s_addc_u32 s21, s3, -1 v_add_nc_u32_e64 v3, s10, 16 s_clause 0x1 s_load_b32 s11, s[20:21], 0x0 s_load_b64 s[20:21], s[2:3], 0x0 s_waitcnt lgkmcnt(0) v_cvt_f32_i32_e32 v5, s20 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_mul_f32 v7, s13, v5 :: v_dual_add_nc_u32 v6, s21, v2 v_cvt_f32_i32_e32 v6, v6 v_add_nc_u32_e32 v4, s11, v1 s_add_i32 s11, s10, 16 s_add_i32 s10, s10, 12 s_add_u32 s2, s2, 12 s_addc_u32 s3, s3, 0 v_cvt_f32_i32_e32 v4, v4 v_mul_f32_e32 v8, s17, v5 s_cmpk_eq_i32 s10, 0x2b8 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fmac_f32_e32 v7, s12, v4 v_dual_mul_f32 v5, s5, v5 :: v_dual_fmac_f32 v8, s16, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fmac_f32_e32 v7, s14, v6 v_dual_fmac_f32 v5, s4, v4 :: v_dual_fmac_f32 v8, s18, v6 v_add_nc_u32_e32 v4, 4, v3 v_add_nc_u32_e32 v3, 8, v3 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_dual_fmac_f32 v5, s6, v6 :: v_dual_add_f32 v6, s15, v7 v_add_f32_e32 v7, s19, v8 s_delay_alu instid0(VALU_DEP_2) v_add_f32_e32 v5, s7, v5 s_clause 0x2 scratch_store_b32 off, v6, s11 scratch_store_b32 v4, v7, off scratch_store_b32 v3, v5, off s_cbranch_scc0 .LBB0_2 s_set_inst_prefetch_distance 0x2 v_or_b32_e64 v1, 16, 4 s_mov_b32 s3, 0 .LBB0_4: s_delay_alu instid0(VALU_DEP_1) v_add_nc_u32_e32 v2, 4, v1 s_add_i32 s4, s3, 0x3c0 s_clause 0x2 scratch_load_b32 v2, v2, off scratch_load_b32 v3, v1, off scratch_load_b32 v4, v1, off offset:-4 s_waitcnt vmcnt(2) v_dual_mul_f32 v2, 0x43d0cccd, v2 :: v_dual_add_nc_u32 v1, 12, v1 s_waitcnt vmcnt(1) v_mul_f32_e32 v3, 0x43d0cccd, v3 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_div_scale_f32 v5, null, v4, v4, v2 v_div_scale_f32 v6, null, v4, v4, v3 v_div_scale_f32 v12, s2, v3, v4, v3 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_rcp_f32_e32 v7, v5 v_rcp_f32_e32 v8, v6 s_waitcnt_depctr 0xfff v_fma_f32 v10, -v5, v7, 1.0 v_fma_f32 v11, -v6, v8, 1.0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_fmac_f32_e32 v7, v10, v7 v_div_scale_f32 v9, vcc_lo, v2, v4, v2 v_fmac_f32_e32 v8, v11, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_mul_f32 v10, v9, v7 :: v_dual_mul_f32 v11, v12, v8 v_fma_f32 v13, -v5, v10, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f32 v14, -v6, v11, v12 v_dual_fmac_f32 v10, v13, v7 :: v_dual_fmac_f32 v11, v14, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f32 v5, -v5, v10, v9 v_fma_f32 v6, -v6, v11, v12 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) v_div_fmas_f32 v5, v5, v7, v10 s_mov_b32 vcc_lo, s2 s_add_i32 s2, s3, 0x2d0 v_div_fmas_f32 v6, v6, v8, v11 s_add_i32 s3, s3, 4 v_div_fixup_f32 v2, v5, v4, v2 s_cmpk_eq_i32 s3, 0xe8 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_div_fixup_f32 v3, v6, v4, v3 v_cvt_i32_f32_e32 v2, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cvt_i32_f32_e32 v3, v3 v_add_nc_u32_e32 v2, 0x15e, v2 s_delay_alu instid0(VALU_DEP_2) v_sub_nc_u32_e32 v3, 0x15e, v3 scratch_store_b32 off, v2, s2 scratch_store_b32 off, v3, s4 s_cbranch_scc0 .LBB0_4 s_load_b64 s[14:15], s[0:1], 0x40 s_clause 0x5 s_load_b64 s[2:3], s[8:9], 0x0 s_load_b32 s10, s[8:9], 0x8 s_load_b64 s[4:5], s[8:9], 0x10 s_load_b32 s11, s[8:9], 0x18 s_load_b64 s[6:7], s[8:9], 0x20 s_load_b32 s12, s[8:9], 0x28 s_mov_b32 s13, 0 s_waitcnt lgkmcnt(0) s_add_u32 s8, s14, 4 s_addc_u32 s9, s15, 0 s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB0_6: s_add_u32 s14, s8, -4 s_addc_u32 s15, s9, -1 s_clause 0x1 s_load_b64 s[16:17], s[8:9], 0x0 s_load_b32 s14, s[14:15], 0x0 v_add_nc_u32_e64 v1, 0x4b0, s13 s_add_i32 s15, s13, 0x4b0 s_add_i32 s13, s13, 12 s_add_u32 s8, s8, 12 s_addc_u32 s9, s9, 0 s_cmpk_eq_i32 s13, 0x480 s_waitcnt lgkmcnt(0) v_cvt_f32_i32_e32 v2, s16 v_cvt_f32_i32_e32 v3, s14 v_cvt_f32_i32_e32 v6, s17 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mul_f32_e32 v5, s5, v2 v_mul_f32_e32 v4, s3, v2 v_dual_mul_f32 v2, s7, v2 :: v_dual_fmac_f32 v5, s4, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fmac_f32_e32 v4, s2, v3 v_dual_fmac_f32 v2, s6, v3 :: v_dual_add_nc_u32 v3, 4, v1 v_add_nc_u32_e32 v1, 8, v1 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_fmac_f32_e32 v5, s11, v6 v_fmac_f32_e32 v4, s10, v6 s_delay_alu instid0(VALU_DEP_4) v_fmac_f32_e32 v2, s12, v6 s_clause 0x2 scratch_store_b32 off, v4, s15 scratch_store_b32 v3, v5, off scratch_store_b32 v1, v2, off s_cbranch_scc0 .LBB0_6 s_set_inst_prefetch_distance 0x2 s_clause 0x1 s_load_b64 s[4:5], s[0:1], 0x0 s_load_b64 s[2:3], s[0:1], 0x38 v_mul_u32_u24_e32 v1, 0x60, v0 v_mov_b32_e32 v3, 0 s_mov_b32 s6, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b32_e32 v1, 2, v1 s_waitcnt lgkmcnt(0) v_add_co_u32 v1, s4, s4, v1 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v2, null, s5, 0, s4 s_mov_b64 s[4:5], s[2:3] s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB0_8: global_load_b32 v4, v3, s[4:5] v_add_nc_u32_e64 v5, 0x4b0, s6 s_add_i32 s7, s6, 0x4b0 s_add_i32 s6, s6, 12 scratch_load_b32 v6, off, s7 s_add_u32 s4, s4, 12 v_add_nc_u32_e32 v7, 4, v5 v_add_nc_u32_e32 v5, 8, v5 s_addc_u32 s5, s5, 0 s_cmpk_eq_i32 s6, 0x480 s_clause 0x1 scratch_load_b32 v7, v7, off scratch_load_b32 v5, v5, off s_waitcnt vmcnt(3) v_mul_lo_u32 v4, v4, 12 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v8, 16, v4 v_add_nc_u32_e32 v4, 16, v4 v_add_nc_u32_e32 v9, 4, v8 v_add_nc_u32_e32 v8, 8, v8 s_clause 0x2 scratch_load_b32 v9, v9, off scratch_load_b32 v4, v4, off scratch_load_b32 v8, v8, off s_waitcnt vmcnt(2) v_mul_f32_e32 v7, v7, v9 s_waitcnt vmcnt(1) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_fmac_f32_e32 v7, v6, v4 s_waitcnt vmcnt(0) v_fmac_f32_e32 v7, v5, v8 s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_f32_e32 vcc_lo, 0, v7 v_cndmask_b32_e64 v4, 0, 1, vcc_lo global_store_b32 v[1:2], v4, off v_add_co_u32 v1, vcc_lo, v1, 4 v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo s_cbranch_scc0 .LBB0_8 s_set_inst_prefetch_distance 0x2 s_load_b128 s[4:7], s[0:1], 0x8 v_mul_u32_u24_e32 v0, 0x120, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_dual_mov_b32 v0, 0 :: v_dual_lshlrev_b32 v3, 2, v0 s_waitcnt lgkmcnt(0) v_add_co_u32 v1, s0, s6, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_add_co_ci_u32_e64 v2, null, s7, 0, s0 v_add_co_u32 v3, s0, s4, v3 v_add_co_ci_u32_e64 v4, null, s5, 0, s0 s_mov_b64 s[0:1], 0 .LBB0_10: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1) s_add_u32 s4, s2, s0 s_addc_u32 s5, s3, s1 global_load_b32 v5, v0, s[4:5] s_waitcnt vmcnt(0) v_lshlrev_b32_e32 v5, 2, v5 v_add_nc_u32_e32 v5, 0x2d0, v5 scratch_load_b32 v7, v5, off v_add_co_u32 v5, vcc_lo, v3, s0 v_add_co_ci_u32_e32 v6, vcc_lo, s1, v4, vcc_lo s_waitcnt vmcnt(0) global_store_b32 v[5:6], v7, off global_load_b32 v7, v0, s[4:5] offset:4 s_waitcnt vmcnt(0) v_lshlrev_b32_e32 v7, 2, v7 s_delay_alu instid0(VALU_DEP_1) v_add_nc_u32_e32 v7, 0x2d0, v7 scratch_load_b32 v7, v7, off s_waitcnt vmcnt(0) global_store_b32 v[5:6], v7, off offset:4 global_load_b32 v7, v0, s[4:5] offset:8 s_waitcnt vmcnt(0) v_lshlrev_b32_e32 v7, 2, v7 s_delay_alu instid0(VALU_DEP_1) v_add_nc_u32_e32 v7, 0x2d0, v7 scratch_load_b32 v7, v7, off s_waitcnt vmcnt(0) global_store_b32 v[5:6], v7, off offset:8 global_load_b32 v5, v0, s[4:5] s_waitcnt vmcnt(0) v_lshlrev_b32_e32 v5, 2, v5 s_delay_alu instid0(VALU_DEP_1) v_add_nc_u32_e32 v5, 0x3c0, v5 scratch_load_b32 v7, v5, off v_add_co_u32 v5, vcc_lo, v1, s0 v_add_co_ci_u32_e32 v6, vcc_lo, s1, v2, vcc_lo s_add_u32 s0, s0, 12 s_addc_u32 s1, s1, 0 s_cmpk_eq_i32 s0, 0x480 s_waitcnt vmcnt(0) global_store_b32 v[5:6], v7, off global_load_b32 v7, v0, s[4:5] offset:4 s_waitcnt vmcnt(0) v_lshlrev_b32_e32 v7, 2, v7 s_delay_alu instid0(VALU_DEP_1) v_add_nc_u32_e32 v7, 0x3c0, v7 scratch_load_b32 v7, v7, off s_waitcnt vmcnt(0) global_store_b32 v[5:6], v7, off offset:4 global_load_b32 v7, v0, s[4:5] offset:8 s_waitcnt vmcnt(0) v_lshlrev_b32_e32 v7, 2, v7 s_delay_alu instid0(VALU_DEP_1) v_add_nc_u32_e32 v7, 0x3c0, v7 scratch_load_b32 v7, v7, off s_waitcnt vmcnt(0) global_store_b32 v[5:6], v7, off offset:8 s_cbranch_scc0 .LBB0_10 .LBB0_11: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel obtainPolygonsSteps .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 2352 .amdhsa_kernarg_size 76 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 1 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 15 .amdhsa_next_free_sgpr 22 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size obtainPolygonsSteps, .Lfunc_end0-obtainPolygonsSteps .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 32 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 40 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 48 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 56 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 64 .size: 8 .value_kind: global_buffer - .offset: 72 .size: 4 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 76 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: obtainPolygonsSteps .private_segment_fixed_size: 2352 .sgpr_count: 24 .sgpr_spill_count: 0 .symbol: obtainPolygonsSteps.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 15 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> extern "C" #define n (3) #define qPoints (58) #define qPolygons (96) __global__ void obtainPolygonsSteps(int* dev_S,int* dev_polygonToFillX,int* dev_polygonToFillY,int* dev_shipLocationX, int* dev_shipLocationZ,float* dev_matrixC,int* dev_points,int* dev_polygons,int* dev_normals,const int N) { int j = threadIdx.x; if (j<N){ //__shared__ float observerCoord[3*qPoints]; for (int i = 0; i < qPoints; i++) { observerCoord[3*i] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[0] + dev_points[3*i+1] * dev_matrixC[1] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[2] +dev_matrixC[3]; observerCoord[3*i+1] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[4] + dev_points[3*i+1] * dev_matrixC[5] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[6] + dev_matrixC[7]; observerCoord[3*i+2] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[8] + dev_points[3*i+1] * dev_matrixC[9] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[10] + dev_matrixC[11]; } /* float to2d[2*qPoints]; for (int i = 0; i < qPoints; i++) { to2d[2*i] = 0.6 * observerCoord[3*i+2] / observerCoord[3*i]; to2d[2*i+1] = 0.6 * observerCoord[3*i+1] / observerCoord[3*i]; } // ---------------to2dmm---------------------------------------------- double to2dmm[2*qPoints]; for (int i = 0; i < qPoints; i++) { to2dmm[2*i] = 160 * to2d[2*i]; to2dmm[2*i+1] = 160 * to2d[2*i+1]; } // -----------------------spX, spY-------------------------------------- int spX[qPoints]; int spY[qPoints]; for (int i = 0; i < qPoints; i++) { spX[i] = (int) (4.35 * to2dmm[2*i]); spY[i] = (int)(4.35 * to2dmm[2*i+1]); } int xp[qPoints]; int yp[qPoints]; for (int i = 0; i < qPoints; i++) { xp[i] = spX[i] + 350; yp[i] = -spY[i] + 350; } */ int xp[qPoints]; int yp[qPoints]; for (int i = 0; i < qPoints; i++) { xp[i] = (int) (4.35f * 160 * 0.6f * observerCoord[3*i+2] / observerCoord[3*i]) + 350; yp[i] = -(int) (4.35f * 160 * 0.6f * observerCoord[3*i+1] / observerCoord[3*i]) + 350; } float NormalObs[3*qPolygons]; for (int i = 0; i < qPolygons; i++) { NormalObs[3*i] = dev_normals[3*i] * dev_matrixC[0] + dev_normals[3*i+1] * dev_matrixC[1] + dev_normals[3*i+2] * dev_matrixC[2]; NormalObs[3*i+1] = dev_normals[3*i] * dev_matrixC[4] + dev_normals[3*i+1] * dev_matrixC[5] + dev_normals[3*i+2] *dev_matrixC[6]; NormalObs[3*i+2] = dev_normals[3*i] * dev_matrixC[8] + dev_normals[3*i+1] * dev_matrixC[9] + dev_normals[3*i+2] * dev_matrixC[10]; } for (int i = 0; i < qPolygons; i++) { if ( ( NormalObs[3*i] * observerCoord[3*dev_polygons[3*i]] + NormalObs[3*i+1] *observerCoord[3*dev_polygons[3*i]+1] + NormalObs[3*i+2] * observerCoord[3*dev_polygons[3*i]+2] )<0){ dev_S[j*qPolygons+i]=1; } else { dev_S[j*qPolygons+i]=0; } } for (int i = 0; i < qPolygons; i++) { dev_polygonToFillX[j*qPolygons*n+3*i] = xp[dev_polygons[3*i]]; dev_polygonToFillX[j*qPolygons*n+3*i+1] = xp[dev_polygons[3*i+1]]; dev_polygonToFillX[j*qPolygons*n+3*i+2] = xp[dev_polygons[3*i+2]]; dev_polygonToFillY[j*qPolygons*n+3*i] = yp[dev_polygons[3*i]]; dev_polygonToFillY[j*qPolygons*n+3*i+1] = yp[dev_polygons[3*i+1]]; dev_polygonToFillY[j*qPolygons*n+3*i+2] = yp[dev_polygons[3*i+2]]; } } }
.text .file "obtainPolygonsSteps.hip" .globl __device_stub__obtainPolygonsSteps # -- Begin function __device_stub__obtainPolygonsSteps .p2align 4, 0x90 .type __device_stub__obtainPolygonsSteps,@function __device_stub__obtainPolygonsSteps: # @__device_stub__obtainPolygonsSteps .cfi_startproc # %bb.0: subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) movq %r9, 48(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 48(%rsp), %rax movq %rax, 136(%rsp) leaq 192(%rsp), %rax movq %rax, 144(%rsp) leaq 200(%rsp), %rax movq %rax, 152(%rsp) leaq 208(%rsp), %rax movq %rax, 160(%rsp) leaq 216(%rsp), %rax movq %rax, 168(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 96(%rsp), %r9 movl $obtainPolygonsSteps, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $200, %rsp .cfi_adjust_cfa_offset -200 retq .Lfunc_end0: .size __device_stub__obtainPolygonsSteps, .Lfunc_end0-__device_stub__obtainPolygonsSteps .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $obtainPolygonsSteps, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type obtainPolygonsSteps,@object # @obtainPolygonsSteps .section .rodata,"a",@progbits .globl obtainPolygonsSteps .p2align 3, 0x0 obtainPolygonsSteps: .quad __device_stub__obtainPolygonsSteps .size obtainPolygonsSteps, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "obtainPolygonsSteps" .size .L__unnamed_1, 20 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__obtainPolygonsSteps .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym obtainPolygonsSteps .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0003974c_00000000-6_obtainPolygonsSteps.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z56__device_stub__Z19obtainPolygonsStepsPiS_S_S_S_PfS_S_S_iPiS_S_S_S_PfS_S_S_i .type _Z56__device_stub__Z19obtainPolygonsStepsPiS_S_S_S_PfS_S_S_iPiS_S_S_S_PfS_S_S_i, @function _Z56__device_stub__Z19obtainPolygonsStepsPiS_S_S_S_PfS_S_S_iPiS_S_S_S_PfS_S_S_i: .LFB2051: .cfi_startproc endbr64 subq $248, %rsp .cfi_def_cfa_offset 256 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movq %rcx, 48(%rsp) movq %r8, 40(%rsp) movq %r9, 32(%rsp) movq 256(%rsp), %rax movq %rax, 24(%rsp) movq 264(%rsp), %rax movq %rax, 16(%rsp) movq 272(%rsp), %rax movq %rax, 8(%rsp) movq %fs:40, %rax movq %rax, 232(%rsp) xorl %eax, %eax leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) leaq 56(%rsp), %rax movq %rax, 160(%rsp) leaq 48(%rsp), %rax movq %rax, 168(%rsp) leaq 40(%rsp), %rax movq %rax, 176(%rsp) leaq 32(%rsp), %rax movq %rax, 184(%rsp) leaq 24(%rsp), %rax movq %rax, 192(%rsp) leaq 16(%rsp), %rax movq %rax, 200(%rsp) leaq 8(%rsp), %rax movq %rax, 208(%rsp) leaq 280(%rsp), %rax movq %rax, 216(%rsp) movl $1, 96(%rsp) movl $1, 100(%rsp) movl $1, 104(%rsp) movl $1, 108(%rsp) movl $1, 112(%rsp) movl $1, 116(%rsp) leaq 88(%rsp), %rcx leaq 80(%rsp), %rdx leaq 108(%rsp), %rsi leaq 96(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 232(%rsp), %rax subq %fs:40, %rax jne .L8 addq $248, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 88(%rsp) .cfi_def_cfa_offset 264 pushq 88(%rsp) .cfi_def_cfa_offset 272 leaq 160(%rsp), %r9 movq 124(%rsp), %rcx movl 132(%rsp), %r8d movq 112(%rsp), %rsi movl 120(%rsp), %edx leaq obtainPolygonsSteps(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 256 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z56__device_stub__Z19obtainPolygonsStepsPiS_S_S_S_PfS_S_S_iPiS_S_S_S_PfS_S_S_i, .-_Z56__device_stub__Z19obtainPolygonsStepsPiS_S_S_S_PfS_S_S_iPiS_S_S_S_PfS_S_S_i .globl obtainPolygonsSteps .type obtainPolygonsSteps, @function obtainPolygonsSteps: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movl 40(%rsp), %eax pushq %rax .cfi_def_cfa_offset 24 pushq 40(%rsp) .cfi_def_cfa_offset 32 pushq 40(%rsp) .cfi_def_cfa_offset 40 pushq 40(%rsp) .cfi_def_cfa_offset 48 call _Z56__device_stub__Z19obtainPolygonsStepsPiS_S_S_S_PfS_S_S_iPiS_S_S_S_PfS_S_S_i addq $40, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size obtainPolygonsSteps, .-obtainPolygonsSteps .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "obtainPolygonsSteps" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq obtainPolygonsSteps(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "obtainPolygonsSteps.hip" .globl __device_stub__obtainPolygonsSteps # -- Begin function __device_stub__obtainPolygonsSteps .p2align 4, 0x90 .type __device_stub__obtainPolygonsSteps,@function __device_stub__obtainPolygonsSteps: # @__device_stub__obtainPolygonsSteps .cfi_startproc # %bb.0: subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) movq %r9, 48(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 48(%rsp), %rax movq %rax, 136(%rsp) leaq 192(%rsp), %rax movq %rax, 144(%rsp) leaq 200(%rsp), %rax movq %rax, 152(%rsp) leaq 208(%rsp), %rax movq %rax, 160(%rsp) leaq 216(%rsp), %rax movq %rax, 168(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 96(%rsp), %r9 movl $obtainPolygonsSteps, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $200, %rsp .cfi_adjust_cfa_offset -200 retq .Lfunc_end0: .size __device_stub__obtainPolygonsSteps, .Lfunc_end0-__device_stub__obtainPolygonsSteps .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $obtainPolygonsSteps, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type obtainPolygonsSteps,@object # @obtainPolygonsSteps .section .rodata,"a",@progbits .globl obtainPolygonsSteps .p2align 3, 0x0 obtainPolygonsSteps: .quad __device_stub__obtainPolygonsSteps .size obtainPolygonsSteps, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "obtainPolygonsSteps" .size .L__unnamed_1, 20 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__obtainPolygonsSteps .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym obtainPolygonsSteps .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <cmath> #include <vector> #include <algorithm> #include <chrono> #include <random> #include <atomic> #include <stdio.h> #define NUM_STREAMS 2 using namespace std; mt19937 rng; random_device rd; __managed__ int n, l, r, s; class Particle { public: operator string() const { char buffer [200]; snprintf(buffer, 200, "%d %.8lf %.8lf %.8lf %.8lf", i, x, y, vX, vY); return buffer; } int i; double x; double y; double vX; double vY; int pColl; int wColl; Particle() {}; Particle(int i, double x, double y, double vX, double vY, int l) { this -> i = i; this -> x = x; this -> y = y; this -> vX = vX; this -> vY = vY; this -> pColl = 0; this -> wColl = 0; } __device__ __host__ int getIndex() { return this->i; } string getFullRepresentation() { char buffer[200]; snprintf(buffer, 200, "%d %.8lf %.8lf %.8lf %.8lf %d %d", i, x, y, vX, vY, pColl, wColl); string res(buffer); return res; } }; class CollisionEvent { public: const static int WALL = 0; const static int PARTICLE = 1; const static int NONE = 2; Particle* first; Particle* second; double time; int type; __host__ __device__ bool operator == (CollisionEvent other) { int firstIndex = (*this->first).getIndex(); int otherFirstIndex = (*other.first).getIndex(); if (this->second != nullptr) { int secondIndex = (*this->second).getIndex(); int otherSecondIndex = (*other.second).getIndex(); return (firstIndex == otherSecondIndex && secondIndex == otherFirstIndex) || (firstIndex == otherFirstIndex && secondIndex == otherSecondIndex); } return (firstIndex == otherFirstIndex); } __host__ __device__ bool operator < (CollisionEvent other) { if (this->time == other.getTime()) return this->getSmallestIndex() > other.getSmallestIndex(); return this->time > other.getTime(); } __host__ __device__ CollisionEvent() {} __host__ __device__ virtual ~CollisionEvent() {} // wall collision event __host__ __device__ CollisionEvent(Particle* first, double time) { this->first = first; this->second = nullptr; this->time = time; this->type = this->WALL; } // particle collision event __host__ __device__ CollisionEvent(Particle* first, Particle* second, double time) { this->first = first; this->second = second; this->time = time; this->type = this->PARTICLE; } // no collision event __host__ __device__ CollisionEvent(Particle* first) { this->first = first; this->second = nullptr; this->time = 1.0; this->type = this->NONE; } __host__ __device__ double getTime() { return this->time; } __host__ __device__ int getType() { return this->type; } __host__ __device__ double getSmallestIndex() { if (this->second != nullptr) { return (*first).getIndex() < (*second).getIndex() ? (*first).getIndex() : (*second).getIndex(); } return (*first).getIndex(); } }; // cuda streams cudaStream_t streams[NUM_STREAMS]; // particle objects __managed__ Particle* particles; // collision times __managed__ double** particleCollisionTimes; __managed__ double* wallCollisionTimes; // collision events __managed__ CollisionEvent* temp; __managed__ int* found; __managed__ CollisionEvent** particleCollisions; __managed__ int particleCollisionsCount; __managed__ CollisionEvent** wallCollisions; __managed__ int wallCollisionsCount; __managed__ CollisionEvent** noCollisions; __managed__ int noCollisionsCount; // function headers __host__ void moveParticles(Particle* particles); __global__ void findEarliestCollision(); __global__ void timeParticleCollision(); __global__ void timeWallCollision(); __global__ void executeParticleCollision(); __global__ void executeWallCollision(); __global__ void executeNoCollision(); __host__ int main (void) { string command; cin >> n >> l >> r >> s >> command; rng.seed(rd()); uniform_real_distribution<double> pos(r, l-r); uniform_real_distribution<double> velocity((double)l/(8*r), (double)l/4); cudaError_t cudaStatus = cudaMallocManaged(&particles, sizeof(Particle) * n); int scanned; for (scanned = 0; scanned < n; ++scanned) { int index; double x; double y; double vX; double vY; int count; count = scanf("%d %lf %lf %lf %lf", &index, &x, &y, &vX, &vY); if (count == EOF || count <= 0) break; particles[scanned] = Particle(index, x, y, vX, vY, l); } for (int j = scanned; j < n; j++) { double x = pos(rng); double y = pos(rng); double vX = velocity(rng); double vY = velocity(rng); particles[scanned] = Particle(j, x, y, vX, vY, l); } for (int i = 0; i < NUM_STREAMS; ++i) { cudaStreamCreate(&streams[i]); } cudaMallocManaged(&particleCollisionTimes, sizeof(double*) * n); for (int i = 0; i < n; ++i) { cudaMallocManaged(&particleCollisionTimes[i], sizeof(double) * n); } cudaMallocManaged(&wallCollisionTimes, sizeof(double) * n); cudaMallocManaged(&found, sizeof(int) * n); cudaMallocManaged(&particleCollisions, sizeof(CollisionEvent*) * n); cudaMallocManaged((void**) &particleCollisionsCount, sizeof(int)); cudaMallocManaged(&wallCollisions, sizeof(CollisionEvent*) * n); cudaMallocManaged((void**) &wallCollisionsCount, sizeof(int)); cudaMallocManaged(&noCollisions, sizeof(CollisionEvent*) * n); cudaMallocManaged((void**) &noCollisionsCount, sizeof(int)); cudaMallocManaged(&temp, sizeof(CollisionEvent) * n); auto start = chrono::high_resolution_clock::now(); for (int i = 0; i < s; ++i) { if (!command.compare("print")) { for (int j = 0; j < n; ++j) { cout << i << " " << (string) particles[j] << endl; } } moveParticles(particles); } auto finish = std::chrono::high_resolution_clock::now(); for (int j = 0; j < n; ++j) { cout << s << " " << particles[j].getFullRepresentation() << endl; } double timeTaken = (double)chrono::duration_cast<chrono::nanoseconds>(finish-start).count()/1000000000; // printf("Time taken: %.5f s\n", timeTaken); return 0; } __host__ void moveParticles(Particle* particles) { for (int i = 0; i < n; ++i) found[i] = -1; particleCollisionsCount = 0; wallCollisionsCount = 0; noCollisionsCount = 0; // calculate collision times timeWallCollision<<<(n-1)/64+1, 64, 0, streams[0]>>>(); dim3 threadsPerBlock(16, 16, 1); dim3 blocksPerGrid((n-1)/16 + 1, (n-1)/16 + 1); timeParticleCollision<<<blocksPerGrid, threadsPerBlock, 0, streams[1]>>>(); cudaDeviceSynchronize(); // find valid collisions int foundCount = 0; while (foundCount != n) { findEarliestCollision<<<(n-1)/64+1,64>>>(); cudaDeviceSynchronize(); for (int i = 0; i < n; ++i) { if (found[i] != -1) continue; CollisionEvent* e = &temp[i]; // particle-particle collision if ((*e).getType() == CollisionEvent::PARTICLE) { int otherIndex = (*(*e).second).getIndex(); if (temp[otherIndex].getType() == CollisionEvent::PARTICLE) { if ((*e) == temp[otherIndex] && i < otherIndex) { found[i] = 0; found[otherIndex] = 0; foundCount += 2; particleCollisions[particleCollisionsCount++] = e; } } } // particle-wall collision else if ((*e).getType() == CollisionEvent::WALL) { found[i] = 0; ++foundCount; wallCollisions[wallCollisionsCount++] = e; } // no collision else { found[i] = 0; ++foundCount; noCollisions[noCollisionsCount++] = e; } } } // apply valid collisions executeParticleCollision<<<(particleCollisionsCount-1)/64+1, 64, 0, streams[0]>>>(); executeWallCollision<<<(wallCollisionsCount-1)/64+1, 64, 0, streams[1]>>>(); executeNoCollision<<<(n-1)/64+1, 64, 0, streams[1]>>>(); cudaDeviceSynchronize(); } __global__ void findEarliestCollision() { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n || found[index] != -1) return; // first assume no collision temp[index] = CollisionEvent(&particles[index]); // check for particle-wall collision if (wallCollisionTimes[index] < temp[index].getTime() && wallCollisionTimes[index] < 1) { temp[index] = CollisionEvent(&particles[index], wallCollisionTimes[index]); } // check for particle-particle collision for (int j = 0; j < n; ++j) { if (index == j || found[j] != -1) continue; double time = particleCollisionTimes[index][j]; if (time > -1 && time < temp[index].getTime() && time < 1) { temp[index] = CollisionEvent(&particles[index], &particles[j], time); } } } // "input": 2 Particles // "output": Returns time taken before collision occurs if they collide, negative value otherwise. __global__ void timeParticleCollision() { int firstIndex = blockIdx.x * blockDim.x + threadIdx.x; int secondIndex = blockIdx.y * blockDim.y + threadIdx.y; if (firstIndex >= n || secondIndex >= n || firstIndex >= secondIndex) return; else { Particle first = particles[firstIndex]; Particle second = particles[secondIndex]; //a, b and c are as in the quadratic formula representation. //t, the time taken for the 2 circles to touch, is the unknown variable we are solving for //by taking difference in circle centres, setting an unknown t for collision time, and then taking distance moved in time t, //we can solve for t such that the circle centers are <= 2r and therefore collide. 4r^2 is to solve for radius distance. double c = pow((first.x-second.x), 2) + pow((first.y - second.y), 2) - 4*r*r; double b = 2*((first.x - second.x)*(first.vX - second.vX) + (first.y - second.y)*(first.vY-second.vY)); double a = pow((first.vX-second.vX), 2) + pow((first.vY - second.vY), 2); //check for solution double solfirst; if (b*b-4*a*c < 0) { solfirst = 100000.0; } else { //else if there is a solution, the one with smaller value should be the main collision. Second value is after the 2 circles phase through each other solfirst = (-sqrt(b*b-4*a*c)-b)/(2*a); if (solfirst < 0) { solfirst = (sqrt(b*b-4*a*c)-b)/(2*a); if (solfirst > 0) solfirst = 0; else solfirst = 100000.0; } } particleCollisionTimes[first.i][second.i] = solfirst; particleCollisionTimes[second.i][first.i] = solfirst; } } // "input": 1 Particle // "output": Returns time taken before collision occurs if it collides with wall, negative value otherwise. __global__ void timeWallCollision() { int particleIndex = blockIdx.x * blockDim.x + threadIdx.x; if (particleIndex >=n) { return; } else { Particle particle = particles[particleIndex]; //check for x wall, y wall collisions double xCollide = particle.vX < 0 ? (particle.x-r)/(0-particle.vX) : ((double)l-particle.x-r)/particle.vX; double yCollide = particle.vY < 0 ? (particle.y-r)/(0-particle.vY) : ((double)l-particle.y-r)/particle.vY; wallCollisionTimes[particle.i] = fmin(xCollide, yCollide); } } __global__ void executeParticleCollision() { int particleIndex = blockIdx.x * blockDim.x + threadIdx.x; if (particleIndex < particleCollisionsCount) { CollisionEvent *e = particleCollisions[particleIndex]; Particle *first = e->first; Particle *second = e->second; double time = e->time; if (first->getIndex() >= second->getIndex()) return; //move them to proper position first first->x += time * first->vX; first->y += time * first->vY; second->x += time * second->vX; second->y += time * second->vY; //perform collision here //find normal vector double normalX = first->x - second->x; double normalY = first->y - second->y; double normalMag = sqrt(pow(normalX, 2) + pow(normalY, 2)); normalX = normalX/normalMag; normalY = normalY/normalMag; double tangentX = -normalY; double tangentY = normalX; //compute velocity vectors wrt to normal and tangent double vFirstNormal = normalX * first->vX + normalY * first->vY; double vFirstTangent = tangentX * first->vX + tangentY * first->vY; double vSecondNormal = normalX * second->vX + normalY * second->vY; double vSecondTangent = tangentX * second->vX + tangentY * second->vY; //collision simply swaps velocities double temp = vFirstNormal; vFirstNormal = vSecondNormal; vSecondNormal = temp; first->vX = vFirstNormal * normalX + vFirstTangent * tangentX; first->vY = vFirstNormal * normalY + vFirstTangent * tangentY; second->vX = vSecondNormal * normalX + vSecondTangent * tangentX; second->vY = vSecondNormal * normalY + vSecondTangent * tangentY; //eliminate negative 0s if (first->vX == -0.0) first->vX = 0.0; if (first->vY == -0.0) first->vY = 0.0; if (second->vX == -0.0) second->vX = 0.0; if (second->vY == -0.0) second->vY = 0.0; //Continue to move them here //Check for wall collisions and stop the particle at wall if so double timeToMove; double xCollide = first->vX < 0 ? (first->x-r)/(0-first->vX) : ((double)l-r-first->x)/first->vX; double yCollide = first->vY < 0 ? (first->y-r)/(0-first->vY) : ((double)l-r-first->y)/first->vY; if (xCollide >= 1-time && yCollide >= 1-time) { timeToMove = 1-time; } else { timeToMove = fmin(xCollide, yCollide); } first->x += timeToMove * first->vX; first->y += timeToMove * first->vY; xCollide = second->vX < 0 ? (second->x-r)/(0-second->vX) : ((double)l-r-second->x)/second->vX; yCollide = second->vY < 0 ? (second->y-r)/(0-second->vY) : ((double)l-r-second->y)/second->vY; if (xCollide >= 1-time && yCollide >= 1-time) { timeToMove = 1-time; } else { timeToMove = fmin(xCollide, yCollide); } second->x += timeToMove * second->vX; second->y += timeToMove * second->vY; first->pColl++; second->pColl++; } } __global__ void executeWallCollision() { int particleIndex = blockIdx.x * blockDim.x + threadIdx.x; if (particleIndex < wallCollisionsCount) { CollisionEvent *e = wallCollisions[particleIndex]; Particle *first = e->first; //check for x wall collisions //check for y wall collisions double xCollide = first->vX < 0 ? (first->x-r)/(0-first->vX) : ((double)l-first->x-r)/first->vX; double yCollide = first->vY < 0 ? (first->y-r)/(0-first->vY) : ((double)l-first->y-r)/first->vY; double earlierTime = fmin(xCollide, yCollide); double laterTime = fmax(xCollide, yCollide); first->x += earlierTime * first->vX; first->y += earlierTime * first->vY; //Reverse direction depending on which collision happens first if (xCollide <= yCollide) { first->vX = -first->vX; } if (yCollide <= xCollide) { first->vY = -first->vY; } //artificially set timing to allow particle to continue after hitting corner if (xCollide == yCollide) { laterTime = 1; } first->x += (fmin(1.0, laterTime)-earlierTime) * first->vX; first->y += (fmin(1.0, laterTime)-earlierTime) * first->vY; first->wColl++; } } __global__ void executeNoCollision() { int particleIndex = blockIdx.x * blockDim.x + threadIdx.x; if (particleIndex < noCollisionsCount) { CollisionEvent *e = noCollisions[particleIndex]; Particle *first = e->first; first->x += first->vX; first->y += first->vY; } }
#include <hip/hip_runtime.h> #include <iostream> #include <cmath> #include <vector> #include <algorithm> #include <chrono> #include <random> #include <atomic> #include <stdio.h> #define NUM_STREAMS 2 using namespace std; mt19937 rng; random_device rd; __managed__ int n, l, r, s; class Particle { public: operator string() const { char buffer [200]; snprintf(buffer, 200, "%d %.8lf %.8lf %.8lf %.8lf", i, x, y, vX, vY); return buffer; } int i; double x; double y; double vX; double vY; int pColl; int wColl; Particle() {}; Particle(int i, double x, double y, double vX, double vY, int l) { this -> i = i; this -> x = x; this -> y = y; this -> vX = vX; this -> vY = vY; this -> pColl = 0; this -> wColl = 0; } __device__ __host__ int getIndex() { return this->i; } string getFullRepresentation() { char buffer[200]; snprintf(buffer, 200, "%d %.8lf %.8lf %.8lf %.8lf %d %d", i, x, y, vX, vY, pColl, wColl); string res(buffer); return res; } }; class CollisionEvent { public: const static int WALL = 0; const static int PARTICLE = 1; const static int NONE = 2; Particle* first; Particle* second; double time; int type; __host__ __device__ bool operator == (CollisionEvent other) { int firstIndex = (*this->first).getIndex(); int otherFirstIndex = (*other.first).getIndex(); if (this->second != nullptr) { int secondIndex = (*this->second).getIndex(); int otherSecondIndex = (*other.second).getIndex(); return (firstIndex == otherSecondIndex && secondIndex == otherFirstIndex) || (firstIndex == otherFirstIndex && secondIndex == otherSecondIndex); } return (firstIndex == otherFirstIndex); } __host__ __device__ bool operator < (CollisionEvent other) { if (this->time == other.getTime()) return this->getSmallestIndex() > other.getSmallestIndex(); return this->time > other.getTime(); } __host__ __device__ CollisionEvent() {} __host__ __device__ virtual ~CollisionEvent() {} // wall collision event __host__ __device__ CollisionEvent(Particle* first, double time) { this->first = first; this->second = nullptr; this->time = time; this->type = this->WALL; } // particle collision event __host__ __device__ CollisionEvent(Particle* first, Particle* second, double time) { this->first = first; this->second = second; this->time = time; this->type = this->PARTICLE; } // no collision event __host__ __device__ CollisionEvent(Particle* first) { this->first = first; this->second = nullptr; this->time = 1.0; this->type = this->NONE; } __host__ __device__ double getTime() { return this->time; } __host__ __device__ int getType() { return this->type; } __host__ __device__ double getSmallestIndex() { if (this->second != nullptr) { return (*first).getIndex() < (*second).getIndex() ? (*first).getIndex() : (*second).getIndex(); } return (*first).getIndex(); } }; // cuda streams hipStream_t streams[NUM_STREAMS]; // particle objects __managed__ Particle* particles; // collision times __managed__ double** particleCollisionTimes; __managed__ double* wallCollisionTimes; // collision events __managed__ CollisionEvent* temp; __managed__ int* found; __managed__ CollisionEvent** particleCollisions; __managed__ int particleCollisionsCount; __managed__ CollisionEvent** wallCollisions; __managed__ int wallCollisionsCount; __managed__ CollisionEvent** noCollisions; __managed__ int noCollisionsCount; // function headers __host__ void moveParticles(Particle* particles); __global__ void findEarliestCollision(); __global__ void timeParticleCollision(); __global__ void timeWallCollision(); __global__ void executeParticleCollision(); __global__ void executeWallCollision(); __global__ void executeNoCollision(); __host__ int main (void) { string command; cin >> n >> l >> r >> s >> command; rng.seed(rd()); uniform_real_distribution<double> pos(r, l-r); uniform_real_distribution<double> velocity((double)l/(8*r), (double)l/4); hipError_t cudaStatus = hipMallocManaged(&particles, sizeof(Particle) * n); int scanned; for (scanned = 0; scanned < n; ++scanned) { int index; double x; double y; double vX; double vY; int count; count = scanf("%d %lf %lf %lf %lf", &index, &x, &y, &vX, &vY); if (count == EOF || count <= 0) break; particles[scanned] = Particle(index, x, y, vX, vY, l); } for (int j = scanned; j < n; j++) { double x = pos(rng); double y = pos(rng); double vX = velocity(rng); double vY = velocity(rng); particles[scanned] = Particle(j, x, y, vX, vY, l); } for (int i = 0; i < NUM_STREAMS; ++i) { hipStreamCreate(&streams[i]); } hipMallocManaged(&particleCollisionTimes, sizeof(double*) * n); for (int i = 0; i < n; ++i) { hipMallocManaged(&particleCollisionTimes[i], sizeof(double) * n); } hipMallocManaged(&wallCollisionTimes, sizeof(double) * n); hipMallocManaged(&found, sizeof(int) * n); hipMallocManaged(&particleCollisions, sizeof(CollisionEvent*) * n); hipMallocManaged((void**) &particleCollisionsCount, sizeof(int)); hipMallocManaged(&wallCollisions, sizeof(CollisionEvent*) * n); hipMallocManaged((void**) &wallCollisionsCount, sizeof(int)); hipMallocManaged(&noCollisions, sizeof(CollisionEvent*) * n); hipMallocManaged((void**) &noCollisionsCount, sizeof(int)); hipMallocManaged(&temp, sizeof(CollisionEvent) * n); auto start = chrono::high_resolution_clock::now(); for (int i = 0; i < s; ++i) { if (!command.compare("print")) { for (int j = 0; j < n; ++j) { cout << i << " " << (string) particles[j] << endl; } } moveParticles(particles); } auto finish = std::chrono::high_resolution_clock::now(); for (int j = 0; j < n; ++j) { cout << s << " " << particles[j].getFullRepresentation() << endl; } double timeTaken = (double)chrono::duration_cast<chrono::nanoseconds>(finish-start).count()/1000000000; // printf("Time taken: %.5f s\n", timeTaken); return 0; } __host__ void moveParticles(Particle* particles) { for (int i = 0; i < n; ++i) found[i] = -1; particleCollisionsCount = 0; wallCollisionsCount = 0; noCollisionsCount = 0; // calculate collision times timeWallCollision<<<(n-1)/64+1, 64, 0, streams[0]>>>(); dim3 threadsPerBlock(16, 16, 1); dim3 blocksPerGrid((n-1)/16 + 1, (n-1)/16 + 1); timeParticleCollision<<<blocksPerGrid, threadsPerBlock, 0, streams[1]>>>(); hipDeviceSynchronize(); // find valid collisions int foundCount = 0; while (foundCount != n) { findEarliestCollision<<<(n-1)/64+1,64>>>(); hipDeviceSynchronize(); for (int i = 0; i < n; ++i) { if (found[i] != -1) continue; CollisionEvent* e = &temp[i]; // particle-particle collision if ((*e).getType() == CollisionEvent::PARTICLE) { int otherIndex = (*(*e).second).getIndex(); if (temp[otherIndex].getType() == CollisionEvent::PARTICLE) { if ((*e) == temp[otherIndex] && i < otherIndex) { found[i] = 0; found[otherIndex] = 0; foundCount += 2; particleCollisions[particleCollisionsCount++] = e; } } } // particle-wall collision else if ((*e).getType() == CollisionEvent::WALL) { found[i] = 0; ++foundCount; wallCollisions[wallCollisionsCount++] = e; } // no collision else { found[i] = 0; ++foundCount; noCollisions[noCollisionsCount++] = e; } } } // apply valid collisions executeParticleCollision<<<(particleCollisionsCount-1)/64+1, 64, 0, streams[0]>>>(); executeWallCollision<<<(wallCollisionsCount-1)/64+1, 64, 0, streams[1]>>>(); executeNoCollision<<<(n-1)/64+1, 64, 0, streams[1]>>>(); hipDeviceSynchronize(); } __global__ void findEarliestCollision() { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n || found[index] != -1) return; // first assume no collision temp[index] = CollisionEvent(&particles[index]); // check for particle-wall collision if (wallCollisionTimes[index] < temp[index].getTime() && wallCollisionTimes[index] < 1) { temp[index] = CollisionEvent(&particles[index], wallCollisionTimes[index]); } // check for particle-particle collision for (int j = 0; j < n; ++j) { if (index == j || found[j] != -1) continue; double time = particleCollisionTimes[index][j]; if (time > -1 && time < temp[index].getTime() && time < 1) { temp[index] = CollisionEvent(&particles[index], &particles[j], time); } } } // "input": 2 Particles // "output": Returns time taken before collision occurs if they collide, negative value otherwise. __global__ void timeParticleCollision() { int firstIndex = blockIdx.x * blockDim.x + threadIdx.x; int secondIndex = blockIdx.y * blockDim.y + threadIdx.y; if (firstIndex >= n || secondIndex >= n || firstIndex >= secondIndex) return; else { Particle first = particles[firstIndex]; Particle second = particles[secondIndex]; //a, b and c are as in the quadratic formula representation. //t, the time taken for the 2 circles to touch, is the unknown variable we are solving for //by taking difference in circle centres, setting an unknown t for collision time, and then taking distance moved in time t, //we can solve for t such that the circle centers are <= 2r and therefore collide. 4r^2 is to solve for radius distance. double c = pow((first.x-second.x), 2) + pow((first.y - second.y), 2) - 4*r*r; double b = 2*((first.x - second.x)*(first.vX - second.vX) + (first.y - second.y)*(first.vY-second.vY)); double a = pow((first.vX-second.vX), 2) + pow((first.vY - second.vY), 2); //check for solution double solfirst; if (b*b-4*a*c < 0) { solfirst = 100000.0; } else { //else if there is a solution, the one with smaller value should be the main collision. Second value is after the 2 circles phase through each other solfirst = (-sqrt(b*b-4*a*c)-b)/(2*a); if (solfirst < 0) { solfirst = (sqrt(b*b-4*a*c)-b)/(2*a); if (solfirst > 0) solfirst = 0; else solfirst = 100000.0; } } particleCollisionTimes[first.i][second.i] = solfirst; particleCollisionTimes[second.i][first.i] = solfirst; } } // "input": 1 Particle // "output": Returns time taken before collision occurs if it collides with wall, negative value otherwise. __global__ void timeWallCollision() { int particleIndex = blockIdx.x * blockDim.x + threadIdx.x; if (particleIndex >=n) { return; } else { Particle particle = particles[particleIndex]; //check for x wall, y wall collisions double xCollide = particle.vX < 0 ? (particle.x-r)/(0-particle.vX) : ((double)l-particle.x-r)/particle.vX; double yCollide = particle.vY < 0 ? (particle.y-r)/(0-particle.vY) : ((double)l-particle.y-r)/particle.vY; wallCollisionTimes[particle.i] = fmin(xCollide, yCollide); } } __global__ void executeParticleCollision() { int particleIndex = blockIdx.x * blockDim.x + threadIdx.x; if (particleIndex < particleCollisionsCount) { CollisionEvent *e = particleCollisions[particleIndex]; Particle *first = e->first; Particle *second = e->second; double time = e->time; if (first->getIndex() >= second->getIndex()) return; //move them to proper position first first->x += time * first->vX; first->y += time * first->vY; second->x += time * second->vX; second->y += time * second->vY; //perform collision here //find normal vector double normalX = first->x - second->x; double normalY = first->y - second->y; double normalMag = sqrt(pow(normalX, 2) + pow(normalY, 2)); normalX = normalX/normalMag; normalY = normalY/normalMag; double tangentX = -normalY; double tangentY = normalX; //compute velocity vectors wrt to normal and tangent double vFirstNormal = normalX * first->vX + normalY * first->vY; double vFirstTangent = tangentX * first->vX + tangentY * first->vY; double vSecondNormal = normalX * second->vX + normalY * second->vY; double vSecondTangent = tangentX * second->vX + tangentY * second->vY; //collision simply swaps velocities double temp = vFirstNormal; vFirstNormal = vSecondNormal; vSecondNormal = temp; first->vX = vFirstNormal * normalX + vFirstTangent * tangentX; first->vY = vFirstNormal * normalY + vFirstTangent * tangentY; second->vX = vSecondNormal * normalX + vSecondTangent * tangentX; second->vY = vSecondNormal * normalY + vSecondTangent * tangentY; //eliminate negative 0s if (first->vX == -0.0) first->vX = 0.0; if (first->vY == -0.0) first->vY = 0.0; if (second->vX == -0.0) second->vX = 0.0; if (second->vY == -0.0) second->vY = 0.0; //Continue to move them here //Check for wall collisions and stop the particle at wall if so double timeToMove; double xCollide = first->vX < 0 ? (first->x-r)/(0-first->vX) : ((double)l-r-first->x)/first->vX; double yCollide = first->vY < 0 ? (first->y-r)/(0-first->vY) : ((double)l-r-first->y)/first->vY; if (xCollide >= 1-time && yCollide >= 1-time) { timeToMove = 1-time; } else { timeToMove = fmin(xCollide, yCollide); } first->x += timeToMove * first->vX; first->y += timeToMove * first->vY; xCollide = second->vX < 0 ? (second->x-r)/(0-second->vX) : ((double)l-r-second->x)/second->vX; yCollide = second->vY < 0 ? (second->y-r)/(0-second->vY) : ((double)l-r-second->y)/second->vY; if (xCollide >= 1-time && yCollide >= 1-time) { timeToMove = 1-time; } else { timeToMove = fmin(xCollide, yCollide); } second->x += timeToMove * second->vX; second->y += timeToMove * second->vY; first->pColl++; second->pColl++; } } __global__ void executeWallCollision() { int particleIndex = blockIdx.x * blockDim.x + threadIdx.x; if (particleIndex < wallCollisionsCount) { CollisionEvent *e = wallCollisions[particleIndex]; Particle *first = e->first; //check for x wall collisions //check for y wall collisions double xCollide = first->vX < 0 ? (first->x-r)/(0-first->vX) : ((double)l-first->x-r)/first->vX; double yCollide = first->vY < 0 ? (first->y-r)/(0-first->vY) : ((double)l-first->y-r)/first->vY; double earlierTime = fmin(xCollide, yCollide); double laterTime = fmax(xCollide, yCollide); first->x += earlierTime * first->vX; first->y += earlierTime * first->vY; //Reverse direction depending on which collision happens first if (xCollide <= yCollide) { first->vX = -first->vX; } if (yCollide <= xCollide) { first->vY = -first->vY; } //artificially set timing to allow particle to continue after hitting corner if (xCollide == yCollide) { laterTime = 1; } first->x += (fmin(1.0, laterTime)-earlierTime) * first->vX; first->y += (fmin(1.0, laterTime)-earlierTime) * first->vY; first->wColl++; } } __global__ void executeNoCollision() { int particleIndex = blockIdx.x * blockDim.x + threadIdx.x; if (particleIndex < noCollisionsCount) { CollisionEvent *e = noCollisions[particleIndex]; Particle *first = e->first; first->x += first->vX; first->y += first->vY; } }
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
extern "C" #define ITERATIONS 10000 __global__ void exec(int iterations, int size, float* inputR, float* inputI, // Real/Imaginary input int* output // Output image in one dimension ) { int i = blockIdx.x * blockDim.x + threadIdx.x; float cR = inputR[i]; float cI = inputI[i]; float q = ((cR - (1.0 / 4.0)) * (cR - (1.0 / 4.0))) + (cI * cI); if (q * (q + (cR - (1.0 / 4.0))) < (1.0 / 4.0) * (cI * cI) || (cR + 1.0) * (cR + 1.0) + (cI * cI) < (1.0 / 16.0)) return; float x = 0; float y = 0; float outX[ITERATIONS]; float outY[ITERATIONS]; for (int j = 0; j < iterations; j++) { outX[j] = x; outY[j] = y; float xNew = (x * x) - (y * y) + cR; float yNew = (2 * x * y) + cI; if (xNew * xNew + yNew * yNew > 4) { for (int k = 1; k < j; k++) { int curX = (outX[k] + 2 ) * size / 4; int curY = (outY[k] + 2 ) * size / 4; int idx = curX + size * curY; output[idx]++; output[idx]++; } return; } x = xNew; y = yNew; } }
code for sm_80 Function : exec .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e220000002500 */ /*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0040*/ IADD3 R1, R1, -0x13880, RZ ; /* 0xfffec78001017810 */ /* 0x000fe20007ffe0ff */ /*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0060*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */ /* 0x001fca00078e0203 */ /*0070*/ IMAD.WIDE R4, R2, R7, c[0x0][0x168] ; /* 0x00005a0002047625 */ /* 0x000fc800078e0207 */ /*0080*/ IMAD.WIDE R2, R2, R7, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fe200078e0207 */ /*0090*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000eaa000c1e1900 */ /*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ee2000c1e1900 */ /*00b0*/ F2F.F64.F32 R6, R0 ; /* 0x0000000000067310 */ /* 0x004e220000201800 */ /*00c0*/ FMUL R10, R2, R2 ; /* 0x00000002020a7220 */ /* 0x008fce0000400000 */ /*00d0*/ F2F.F64.F32 R10, R10 ; /* 0x0000000a000a7310 */ /* 0x000e620000201800 */ /*00e0*/ DADD R8, R6, -0.25 ; /* 0xbfd0000006087429 */ /* 0x001e4c0000000000 */ /*00f0*/ DFMA R12, R8, R8, R10 ; /* 0x00000008080c722b */ /* 0x002e08000000000a */ /*0100*/ DMUL R16, R10, 0.25 ; /* 0x3fd000000a107828 */ /* 0x000fcc0000000000 */ /*0110*/ F2F.F32.F64 R12, R12 ; /* 0x0000000c000c7310 */ /* 0x001e300000301000 */ /*0120*/ F2F.F64.F32 R14, R12 ; /* 0x0000000c000e7310 */ /* 0x001e240000201800 */ /*0130*/ DADD R8, R8, R14 ; /* 0x0000000008087229 */ /* 0x001e0c000000000e */ /*0140*/ DMUL R8, R14, R8 ; /* 0x000000080e087228 */ /* 0x001e0c0000000000 */ /*0150*/ DSETP.GEU.AND P0, PT, R8, R16, PT ; /* 0x000000100800722a */ /* 0x001e1c0003f0e000 */ /*0160*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x001fea0003800000 */ /*0170*/ MOV R3, c[0x0][0x160] ; /* 0x0000580000037a02 */ /* 0x000fe20000000f00 */ /*0180*/ DADD R6, R6, 1 ; /* 0x3ff0000006067429 */ /* 0x000e060000000000 */ /*0190*/ ISETP.GE.AND P0, PT, R3, 0x1, PT ; /* 0x000000010300780c */ /* 0x000fc60003f06270 */ /*01a0*/ DFMA R6, R6, R6, R10 ; /* 0x000000060606722b */ /* 0x001e14000000000a */ /*01b0*/ DSETP.LT.OR P0, PT, R6, 0.0625, !P0 ; /* 0x3fb000000600742a */ /* 0x001e1c0004701400 */ /*01c0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x001fea0003800000 */ /*01d0*/ BSSY B0, 0x340 ; /* 0x0000016000007945 */ /* 0x000fe20003800000 */ /*01e0*/ CS2R R4, SRZ ; /* 0x0000000000047805 */ /* 0x000fe2000001ff00 */ /*01f0*/ MOV R6, RZ ; /* 0x000000ff00067202 */ /* 0x000fe40000000f00 */ /*0200*/ IADD3 R3, R1, 0x9c40, RZ ; /* 0x00009c4001037810 */ /* 0x000fc60007ffe0ff */ /*0210*/ FMUL R9, R5, R5 ; /* 0x0000000505097220 */ /* 0x000fe20000400000 */ /*0220*/ LEA R7, R4, R1, 0x2 ; /* 0x0000000104077211 */ /* 0x000fe200078e10ff */ /*0230*/ FADD R8, R6.reuse, R6 ; /* 0x0000000606087221 */ /* 0x040fe40000000000 */ /*0240*/ FFMA R9, R6, R6, -R9 ; /* 0x0000000606097223 */ /* 0x000fe40000000809 */ /*0250*/ FFMA R8, R8, R5, R2 ; /* 0x0000000508087223 */ /* 0x000fe20000000002 */ /*0260*/ STL [R7], R6 ; /* 0x0000000607007387 */ /* 0x0001e20000100800 */ /*0270*/ FADD R10, R0, R9 ; /* 0x00000009000a7221 */ /* 0x000fc60000000000 */ /*0280*/ STL [R7+0x9c40], R5 ; /* 0x009c400507007387 */ /* 0x0001e20000100800 */ /*0290*/ FMUL R9, R10, R10 ; /* 0x0000000a0a097220 */ /* 0x000fc80000400000 */ /*02a0*/ FFMA R9, R8, R8, R9 ; /* 0x0000000808097223 */ /* 0x000fca0000000009 */ /*02b0*/ FSETP.GT.AND P0, PT, R9, 4, PT ; /* 0x408000000900780b */ /* 0x000fda0003f04000 */ /*02c0*/ @P0 BRA 0x330 ; /* 0x0000006000000947 */ /* 0x000fea0003800000 */ /*02d0*/ IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104047810 */ /* 0x001fe40007ffe0ff */ /*02e0*/ MOV R5, R8 ; /* 0x0000000800057202 */ /* 0x000fe40000000f00 */ /*02f0*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x160], PT ; /* 0x0000580004007a0c */ /* 0x000fe40003f06270 */ /*0300*/ MOV R6, R10 ; /* 0x0000000a00067202 */ /* 0x000fd60000000f00 */ /*0310*/ @!P0 BRA 0x210 ; /* 0xfffffef000008947 */ /* 0x000fea000383ffff */ /*0320*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0330*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x001fea0003800000 */ /*0340*/ ISETP.GE.U32.AND P0, PT, R4, 0x2, PT ; /* 0x000000020400780c */ /* 0x000fda0003f06070 */ /*0350*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0360*/ IADD3 R11, R4.reuse, -0x1, RZ ; /* 0xffffffff040b7810 */ /* 0x040fe20007ffe0ff */ /*0370*/ I2F R6, c[0x0][0x164] ; /* 0x0000590000067b06 */ /* 0x000e220000201400 */ /*0380*/ IADD3 R4, R4, -0x2, RZ ; /* 0xfffffffe04047810 */ /* 0x000fe20007ffe0ff */ /*0390*/ BSSY B0, 0x7d0 ; /* 0x0000043000007945 */ /* 0x000fe20003800000 */ /*03a0*/ LOP3.LUT R2, R11, 0x3, RZ, 0xc0, !PT ; /* 0x000000030b027812 */ /* 0x000fe200078ec0ff */ /*03b0*/ HFMA2.MMA R10, -RZ, RZ, 0, 5.9604644775390625e-08 ; /* 0x00000001ff0a7435 */ /* 0x000fe200000001ff */ /*03c0*/ ISETP.GE.U32.AND P1, PT, R4, 0x3, PT ; /* 0x000000030400780c */ /* 0x000fe40003f26070 */ /*03d0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fd60003f05270 */ /*03e0*/ @!P1 BRA 0x7c0 ; /* 0x000003d000009947 */ /* 0x000fea0003800000 */ /*03f0*/ IADD3 R11, R11, -R2, RZ ; /* 0x800000020b0b7210 */ /* 0x001fe40007ffe0ff */ /*0400*/ MOV R10, 0x1 ; /* 0x00000001000a7802 */ /* 0x000fe40000000f00 */ /*0410*/ MOV R7, R3 ; /* 0x0000000300077202 */ /* 0x000fe40000000f00 */ /*0420*/ MOV R0, R1 ; /* 0x0000000100007202 */ /* 0x000fca0000000f00 */ /*0430*/ LDL R4, [R0+0x4] ; /* 0x0000040000047983 */ /* 0x001ea80000100800 */ /*0440*/ LDL R8, [R7+0x4] ; /* 0x0000040007087983 */ /* 0x000ee20000100800 */ /*0450*/ MOV R16, 0x4 ; /* 0x0000000400107802 */ /* 0x000fc60000000f00 */ /*0460*/ LDL R20, [R7+0x10] ; /* 0x0000100007147983 */ /* 0x000f220000100800 */ /*0470*/ FADD R5, R4, 2 ; /* 0x4000000004057421 */ /* 0x004fe40000000000 */ /*0480*/ FADD R9, R8, 2 ; /* 0x4000000008097421 */ /* 0x008fe40000000000 */ /*0490*/ FMUL.D4 R14, R6.reuse, R5 ; /* 0x00000005060e7220 */ /* 0x040fe40000200000 */ /*04a0*/ FMUL.D4 R15, R6, R9 ; /* 0x00000009060f7220 */ /* 0x000fe20000200000 */ /*04b0*/ LDL.64 R4, [R0+0x8] ; /* 0x0000080000047983 */ /* 0x000ea60000100a00 */ /*04c0*/ F2I.TRUNC.NTZ R14, R14 ; /* 0x0000000e000e7305 */ /* 0x000fe2000020f100 */ /*04d0*/ LDL.64 R8, [R7+0x8] ; /* 0x0000080007087983 */ /* 0x000eee0000100a00 */ /*04e0*/ F2I.TRUNC.NTZ R15, R15 ; /* 0x0000000f000f7305 */ /* 0x000e24000020f100 */ /*04f0*/ IMAD R13, R15, c[0x0][0x164], R14 ; /* 0x000059000f0d7a24 */ /* 0x001fc800078e020e */ /*0500*/ IMAD.WIDE R12, R13, R16, c[0x0][0x178] ; /* 0x00005e000d0c7625 */ /* 0x000fca00078e0210 */ /*0510*/ LDG.E R17, [R12.64] ; /* 0x000000040c117981 */ /* 0x000f62000c1e1900 */ /*0520*/ FADD R19, R4, 2 ; /* 0x4000000004137421 */ /* 0x004fe40000000000 */ /*0530*/ FADD R21, R8, 2 ; /* 0x4000000008157421 */ /* 0x008fe40000000000 */ /*0540*/ FMUL.D4 R19, R6.reuse, R19 ; /* 0x0000001306137220 */ /* 0x040fe40000200000 */ /*0550*/ FMUL.D4 R21, R6, R21 ; /* 0x0000001506157220 */ /* 0x000fc80000200000 */ /*0560*/ F2I.TRUNC.NTZ R19, R19 ; /* 0x0000001300137305 */ /* 0x000ff0000020f100 */ /*0570*/ F2I.TRUNC.NTZ R4, R21 ; /* 0x0000001500047305 */ /* 0x000e24000020f100 */ /*0580*/ IMAD R15, R4, c[0x0][0x164], R19 ; /* 0x00005900040f7a24 */ /* 0x001fe200078e0213 */ /*0590*/ IADD3 R17, R17, 0x2, RZ ; /* 0x0000000211117810 */ /* 0x020fe20007ffe0ff */ /*05a0*/ FADD R5, R5, 2 ; /* 0x4000000005057421 */ /* 0x000fe20000000000 */ /*05b0*/ LDL R19, [R0+0x10] ; /* 0x0000100000137983 */ /* 0x000ea20000100800 */ /*05c0*/ IMAD.WIDE R14, R15, R16, c[0x0][0x178] ; /* 0x00005e000f0e7625 */ /* 0x000fc600078e0210 */ /*05d0*/ STG.E [R12.64], R17 ; /* 0x000000110c007986 */ /* 0x0001e8000c101904 */ /*05e0*/ LDG.E R4, [R14.64] ; /* 0x000000040e047981 */ /* 0x000ee2000c1e1900 */ /*05f0*/ FADD R9, R9, 2 ; /* 0x4000000009097421 */ /* 0x000fe40000000000 */ /*0600*/ FMUL.D4 R8, R6.reuse, R5 ; /* 0x0000000506087220 */ /* 0x040fe40000200000 */ /*0610*/ FMUL.D4 R18, R6, R9 ; /* 0x0000000906127220 */ /* 0x000fc80000200000 */ /*0620*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000ff0000020f100 */ /*0630*/ F2I.TRUNC.NTZ R5, R18 ; /* 0x0000001200057305 */ /* 0x000e64000020f100 */ /*0640*/ IMAD R5, R5, c[0x0][0x164], R8 ; /* 0x0000590005057a24 */ /* 0x002fe200078e0208 */ /*0650*/ IADD3 R9, R4, 0x2, RZ ; /* 0x0000000204097810 */ /* 0x008fc60007ffe0ff */ /*0660*/ IMAD.WIDE R4, R5, R16, c[0x0][0x178] ; /* 0x00005e0005047625 */ /* 0x000fe400078e0210 */ /*0670*/ STG.E [R14.64], R9 ; /* 0x000000090e007986 */ /* 0x0003e8000c101904 */ /*0680*/ LDG.E R12, [R4.64] ; /* 0x00000004040c7981 */ /* 0x001ee2000c1e1900 */ /*0690*/ FADD R19, R19, 2 ; /* 0x4000000013137421 */ /* 0x004fe40000000000 */ /*06a0*/ FADD R13, R20, 2 ; /* 0x40000000140d7421 */ /* 0x010fe40000000000 */ /*06b0*/ FMUL.D4 R19, R6, R19 ; /* 0x0000001306137220 */ /* 0x000fc40000200000 */ /*06c0*/ FMUL.D4 R8, R6, R13 ; /* 0x0000000d06087220 */ /* 0x000fc80000200000 */ /*06d0*/ F2I.TRUNC.NTZ R19, R19 ; /* 0x0000001300137305 */ /* 0x000ff0000020f100 */ /*06e0*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e24000020f100 */ /*06f0*/ IMAD R17, R8, c[0x0][0x164], R19 ; /* 0x0000590008117a24 */ /* 0x001fc800078e0213 */ /*0700*/ IMAD.WIDE R16, R17, R16, c[0x0][0x178] ; /* 0x00005e0011107625 */ /* 0x000fe200078e0210 */ /*0710*/ IADD3 R13, R12, 0x2, RZ ; /* 0x000000020c0d7810 */ /* 0x008fca0007ffe0ff */ /*0720*/ STG.E [R4.64], R13 ; /* 0x0000000d04007986 */ /* 0x0001e8000c101904 */ /*0730*/ LDG.E R9, [R16.64] ; /* 0x0000000410097981 */ /* 0x002ea2000c1e1900 */ /*0740*/ IADD3 R11, R11, -0x4, RZ ; /* 0xfffffffc0b0b7810 */ /* 0x000fc80007ffe0ff */ /*0750*/ ISETP.NE.AND P1, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */ /* 0x000fe40003f25270 */ /*0760*/ IADD3 R0, R0, 0x10, RZ ; /* 0x0000001000007810 */ /* 0x000fe40007ffe0ff */ /*0770*/ IADD3 R7, R7, 0x10, RZ ; /* 0x0000001007077810 */ /* 0x000fe40007ffe0ff */ /*0780*/ IADD3 R10, R10, 0x4, RZ ; /* 0x000000040a0a7810 */ /* 0x000fe40007ffe0ff */ /*0790*/ IADD3 R9, R9, 0x2, RZ ; /* 0x0000000209097810 */ /* 0x004fca0007ffe0ff */ /*07a0*/ STG.E [R16.64], R9 ; /* 0x0000000910007986 */ /* 0x0001e2000c101904 */ /*07b0*/ @P1 BRA 0x430 ; /* 0xfffffc7000001947 */ /* 0x000fea000383ffff */ /*07c0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x001fea0003800000 */ /*07d0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*07e0*/ LEA R8, R10.reuse, R3, 0x2 ; /* 0x000000030a087211 */ /* 0x040fe200078e10ff */ /*07f0*/ IMAD R10, R10, 0x4, R1 ; /* 0x000000040a0a7824 */ /* 0x000fca00078e0201 */ /*0800*/ LDL R0, [R10] ; /* 0x000000000a007983 */ /* 0x000ea80000100800 */ /*0810*/ LDL R4, [R8] ; /* 0x0000000008047983 */ /* 0x001ee20000100800 */ /*0820*/ FADD R3, R0, 2 ; /* 0x4000000000037421 */ /* 0x004fe40000000000 */ /*0830*/ FADD R5, R4, 2 ; /* 0x4000000004057421 */ /* 0x008fe40000000000 */ /*0840*/ FMUL.D4 R3, R6, R3 ; /* 0x0000000306037220 */ /* 0x000fc40000200000 */ /*0850*/ FMUL.D4 R0, R6, R5 ; /* 0x0000000506007220 */ /* 0x000fe20000200000 */ /*0860*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fc600000001ff */ /*0870*/ F2I.TRUNC.NTZ R3, R3 ; /* 0x0000000300037305 */ /* 0x000ff0000020f100 */ /*0880*/ F2I.TRUNC.NTZ R0, R0 ; /* 0x0000000000007305 */ /* 0x000e24000020f100 */ /*0890*/ IMAD R4, R0, c[0x0][0x164], R3 ; /* 0x0000590000047a24 */ /* 0x001fc800078e0203 */ /*08a0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x178] ; /* 0x00005e0004047625 */ /* 0x000fca00078e0205 */ /*08b0*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */ /* 0x000ea2000c1e1900 */ /*08c0*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */ /* 0x000fe40007ffe0ff */ /*08d0*/ IADD3 R10, R10, 0x4, RZ ; /* 0x000000040a0a7810 */ /* 0x000fe40007ffe0ff */ /*08e0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fe40003f05270 */ /*08f0*/ IADD3 R8, R8, 0x4, RZ ; /* 0x0000000408087810 */ /* 0x000fe40007ffe0ff */ /*0900*/ IADD3 R7, R7, 0x2, RZ ; /* 0x0000000207077810 */ /* 0x004fca0007ffe0ff */ /*0910*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x0001e8000c101904 */ /*0920*/ @P0 BRA 0x800 ; /* 0xfffffed000000947 */ /* 0x000fea000383ffff */ /*0930*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0940*/ BRA 0x940; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0950*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0960*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0970*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0980*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0990*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
extern "C" #define ITERATIONS 10000 __global__ void exec(int iterations, int size, float* inputR, float* inputI, // Real/Imaginary input int* output // Output image in one dimension ) { int i = blockIdx.x * blockDim.x + threadIdx.x; float cR = inputR[i]; float cI = inputI[i]; float q = ((cR - (1.0 / 4.0)) * (cR - (1.0 / 4.0))) + (cI * cI); if (q * (q + (cR - (1.0 / 4.0))) < (1.0 / 4.0) * (cI * cI) || (cR + 1.0) * (cR + 1.0) + (cI * cI) < (1.0 / 16.0)) return; float x = 0; float y = 0; float outX[ITERATIONS]; float outY[ITERATIONS]; for (int j = 0; j < iterations; j++) { outX[j] = x; outY[j] = y; float xNew = (x * x) - (y * y) + cR; float yNew = (2 * x * y) + cI; if (xNew * xNew + yNew * yNew > 4) { for (int k = 1; k < j; k++) { int curX = (outX[k] + 2 ) * size / 4; int curY = (outY[k] + 2 ) * size / 4; int idx = curX + size * curY; output[idx]++; output[idx]++; } return; } x = xNew; y = yNew; } }
.file "tmpxft_000df471_00000000-6_Buddha2.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z29__device_stub__Z4execiiPfS_PiiiPfS_Pi .type _Z29__device_stub__Z4execiiPfS_PiiiPfS_Pi, @function _Z29__device_stub__Z4execiiPfS_PiiiPfS_Pi: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 28(%rsp) movl %esi, 24(%rsp) movq %rdx, 16(%rsp) movq %rcx, 8(%rsp) movq %r8, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 24(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq exec(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z29__device_stub__Z4execiiPfS_PiiiPfS_Pi, .-_Z29__device_stub__Z4execiiPfS_PiiiPfS_Pi .globl exec .type exec, @function exec: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z4execiiPfS_PiiiPfS_Pi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size exec, .-exec .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "exec" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq exec(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
extern "C" #define ITERATIONS 10000 __global__ void exec(int iterations, int size, float* inputR, float* inputI, // Real/Imaginary input int* output // Output image in one dimension ) { int i = blockIdx.x * blockDim.x + threadIdx.x; float cR = inputR[i]; float cI = inputI[i]; float q = ((cR - (1.0 / 4.0)) * (cR - (1.0 / 4.0))) + (cI * cI); if (q * (q + (cR - (1.0 / 4.0))) < (1.0 / 4.0) * (cI * cI) || (cR + 1.0) * (cR + 1.0) + (cI * cI) < (1.0 / 16.0)) return; float x = 0; float y = 0; float outX[ITERATIONS]; float outY[ITERATIONS]; for (int j = 0; j < iterations; j++) { outX[j] = x; outY[j] = y; float xNew = (x * x) - (y * y) + cR; float yNew = (2 * x * y) + cI; if (xNew * xNew + yNew * yNew > 4) { for (int k = 1; k < j; k++) { int curX = (outX[k] + 2 ) * size / 4; int curY = (outY[k] + 2 ) * size / 4; int idx = curX + size * curY; output[idx]++; output[idx]++; } return; } x = xNew; y = yNew; } }
#include <hip/hip_runtime.h> extern "C" #define ITERATIONS 10000 __global__ void exec(int iterations, int size, float* inputR, float* inputI, // Real/Imaginary input int* output // Output image in one dimension ) { int i = blockIdx.x * blockDim.x + threadIdx.x; float cR = inputR[i]; float cI = inputI[i]; float q = ((cR - (1.0 / 4.0)) * (cR - (1.0 / 4.0))) + (cI * cI); if (q * (q + (cR - (1.0 / 4.0))) < (1.0 / 4.0) * (cI * cI) || (cR + 1.0) * (cR + 1.0) + (cI * cI) < (1.0 / 16.0)) return; float x = 0; float y = 0; float outX[ITERATIONS]; float outY[ITERATIONS]; for (int j = 0; j < iterations; j++) { outX[j] = x; outY[j] = y; float xNew = (x * x) - (y * y) + cR; float yNew = (2 * x * y) + cI; if (xNew * xNew + yNew * yNew > 4) { for (int k = 1; k < j; k++) { int curX = (outX[k] + 2 ) * size / 4; int curY = (outY[k] + 2 ) * size / 4; int idx = curX + size * curY; output[idx]++; output[idx]++; } return; } x = xNew; y = yNew; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> extern "C" #define ITERATIONS 10000 __global__ void exec(int iterations, int size, float* inputR, float* inputI, // Real/Imaginary input int* output // Output image in one dimension ) { int i = blockIdx.x * blockDim.x + threadIdx.x; float cR = inputR[i]; float cI = inputI[i]; float q = ((cR - (1.0 / 4.0)) * (cR - (1.0 / 4.0))) + (cI * cI); if (q * (q + (cR - (1.0 / 4.0))) < (1.0 / 4.0) * (cI * cI) || (cR + 1.0) * (cR + 1.0) + (cI * cI) < (1.0 / 16.0)) return; float x = 0; float y = 0; float outX[ITERATIONS]; float outY[ITERATIONS]; for (int j = 0; j < iterations; j++) { outX[j] = x; outY[j] = y; float xNew = (x * x) - (y * y) + cR; float yNew = (2 * x * y) + cI; if (xNew * xNew + yNew * yNew > 4) { for (int k = 1; k < j; k++) { int curX = (outX[k] + 2 ) * size / 4; int curY = (outY[k] + 2 ) * size / 4; int idx = curX + size * curY; output[idx]++; output[idx]++; } return; } x = xNew; y = yNew; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected exec .globl exec .p2align 8 .type exec,@function exec: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b128 s[4:7], s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] v_add_co_u32 v2, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo global_load_b32 v4, v[2:3], off global_load_b32 v5, v[0:1], off s_waitcnt vmcnt(1) v_cvt_f64_f32_e32 v[0:1], v4 s_waitcnt vmcnt(0) v_mul_f32_e32 v2, v5, v5 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_cvt_f64_f32_e32 v[2:3], v2 v_add_f64 v[6:7], v[0:1], 0xbfd00000 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[8:9], v[6:7], v[6:7], v[2:3] v_cvt_f32_f64_e32 v8, v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cvt_f64_f32_e32 v[8:9], v8 v_add_f64 v[6:7], v[6:7], v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mul_f64 v[6:7], v[6:7], v[8:9] v_ldexp_f64 v[8:9], v[2:3], -2 v_cmpx_nlt_f64_e32 v[6:7], v[8:9] s_cbranch_execz .LBB0_10 v_add_f64 v[0:1], v[0:1], 1.0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[0:1], v[0:1], v[0:1], v[2:3] v_cmp_ngt_f64_e32 vcc_lo, 0x3fb00000, v[0:1] s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_10 s_load_b32 s4, s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s4, 1 s_cbranch_scc1 .LBB0_10 s_clause 0x1 s_load_b32 s5, s[0:1], 0x4 s_load_b64 s[2:3], s[0:1], 0x18 v_or_b32_e64 v0, 16, 4 v_or_b32_e64 v1, 0x9c50, 4 v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v6, 0 s_mov_b32 s1, 0 s_mov_b32 s6, 0 s_waitcnt lgkmcnt(0) v_cvt_f32_i32_e32 v2, s5 s_branch .LBB0_6 .LBB0_4: s_set_inst_prefetch_distance 0x2 v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v8, v3 .LBB0_5: s_or_b32 exec_lo, exec_lo, s7 s_add_i32 s6, s6, 1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_dual_mov_b32 v3, v8 :: v_dual_mov_b32 v6, v7 s_cmp_eq_u32 s6, s4 s_cselect_b32 s0, -1, 0 s_or_b32 s0, vcc_lo, s0 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s0, exec_lo, s0 s_or_b32 s1, s0, s1 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s1 s_cbranch_execz .LBB0_10 .LBB0_6: v_dual_mul_f32 v7, v6, v6 :: v_dual_add_f32 v8, v3, v3 s_lshl_b32 s0, s6, 2 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) s_add_i32 s7, s0, 16 v_fma_f32 v9, v3, v3, -v7 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1) v_fma_f32 v7, v6, v8, v5 s_add_i32 s0, s0, 0x9c50 scratch_store_b32 off, v3, s7 scratch_store_b32 off, v6, s0 v_dual_add_f32 v8, v4, v9 :: v_dual_mul_f32 v9, v7, v7 v_fmac_f32_e32 v9, v8, v8 s_delay_alu instid0(VALU_DEP_1) v_cmp_lt_f32_e32 vcc_lo, 4.0, v9 s_and_saveexec_b32 s7, vcc_lo s_cbranch_execz .LBB0_5 s_cmp_lt_u32 s6, 2 s_cbranch_scc1 .LBB0_4 v_dual_mov_b32 v7, v1 :: v_dual_mov_b32 v8, v0 s_mov_b32 s8, 1 s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB0_9: scratch_load_b32 v9, v8, off scratch_load_b32 v10, v7, off v_add_nc_u32_e32 v7, 4, v7 s_add_i32 s8, s8, 1 v_add_nc_u32_e32 v8, 4, v8 s_cmp_lt_u32 s8, s6 s_waitcnt vmcnt(0) v_dual_add_f32 v9, 2.0, v9 :: v_dual_add_f32 v10, 2.0, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f32_e32 v9, v9, v2 v_dual_mul_f32 v10, v10, v2 :: v_dual_mul_f32 v9, 0x3e800000, v9 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_f32_e32 v10, 0x3e800000, v10 v_cvt_i32_f32_e32 v9, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cvt_i32_f32_e32 v12, v10 v_mad_u64_u32 v[10:11], null, v12, s5, v[9:10] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v11, 31, v10 v_lshlrev_b64 v[9:10], 2, v[10:11] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_co_u32 v9, s0, s2, v9 v_add_co_ci_u32_e64 v10, s0, s3, v10, s0 global_load_b32 v11, v[9:10], off s_waitcnt vmcnt(0) v_add_nc_u32_e32 v11, 2, v11 global_store_b32 v[9:10], v11, off s_cbranch_scc1 .LBB0_9 s_branch .LBB0_4 .LBB0_10: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel exec .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 80016 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 1 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 13 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size exec, .Lfunc_end0-exec .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .offset: 4 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: exec .private_segment_fixed_size: 80016 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: exec.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 13 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> extern "C" #define ITERATIONS 10000 __global__ void exec(int iterations, int size, float* inputR, float* inputI, // Real/Imaginary input int* output // Output image in one dimension ) { int i = blockIdx.x * blockDim.x + threadIdx.x; float cR = inputR[i]; float cI = inputI[i]; float q = ((cR - (1.0 / 4.0)) * (cR - (1.0 / 4.0))) + (cI * cI); if (q * (q + (cR - (1.0 / 4.0))) < (1.0 / 4.0) * (cI * cI) || (cR + 1.0) * (cR + 1.0) + (cI * cI) < (1.0 / 16.0)) return; float x = 0; float y = 0; float outX[ITERATIONS]; float outY[ITERATIONS]; for (int j = 0; j < iterations; j++) { outX[j] = x; outY[j] = y; float xNew = (x * x) - (y * y) + cR; float yNew = (2 * x * y) + cI; if (xNew * xNew + yNew * yNew > 4) { for (int k = 1; k < j; k++) { int curX = (outX[k] + 2 ) * size / 4; int curY = (outY[k] + 2 ) * size / 4; int idx = curX + size * curY; output[idx]++; output[idx]++; } return; } x = xNew; y = yNew; } }
.text .file "Buddha2.hip" .globl __device_stub__exec # -- Begin function __device_stub__exec .p2align 4, 0x90 .type __device_stub__exec,@function __device_stub__exec: # @__device_stub__exec .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 4(%rsp) movl %esi, (%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) leaq 4(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) leaq 72(%rsp), %rax movq %rax, 96(%rsp) leaq 64(%rsp), %rax movq %rax, 104(%rsp) leaq 56(%rsp), %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $exec, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size __device_stub__exec, .Lfunc_end0-__device_stub__exec .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $exec, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type exec,@object # @exec .section .rodata,"a",@progbits .globl exec .p2align 3, 0x0 exec: .quad __device_stub__exec .size exec, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "exec" .size .L__unnamed_1, 5 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__exec .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym exec .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : exec .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e220000002500 */ /*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0040*/ IADD3 R1, R1, -0x13880, RZ ; /* 0xfffec78001017810 */ /* 0x000fe20007ffe0ff */ /*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0060*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */ /* 0x001fca00078e0203 */ /*0070*/ IMAD.WIDE R4, R2, R7, c[0x0][0x168] ; /* 0x00005a0002047625 */ /* 0x000fc800078e0207 */ /*0080*/ IMAD.WIDE R2, R2, R7, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fe200078e0207 */ /*0090*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000eaa000c1e1900 */ /*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ee2000c1e1900 */ /*00b0*/ F2F.F64.F32 R6, R0 ; /* 0x0000000000067310 */ /* 0x004e220000201800 */ /*00c0*/ FMUL R10, R2, R2 ; /* 0x00000002020a7220 */ /* 0x008fce0000400000 */ /*00d0*/ F2F.F64.F32 R10, R10 ; /* 0x0000000a000a7310 */ /* 0x000e620000201800 */ /*00e0*/ DADD R8, R6, -0.25 ; /* 0xbfd0000006087429 */ /* 0x001e4c0000000000 */ /*00f0*/ DFMA R12, R8, R8, R10 ; /* 0x00000008080c722b */ /* 0x002e08000000000a */ /*0100*/ DMUL R16, R10, 0.25 ; /* 0x3fd000000a107828 */ /* 0x000fcc0000000000 */ /*0110*/ F2F.F32.F64 R12, R12 ; /* 0x0000000c000c7310 */ /* 0x001e300000301000 */ /*0120*/ F2F.F64.F32 R14, R12 ; /* 0x0000000c000e7310 */ /* 0x001e240000201800 */ /*0130*/ DADD R8, R8, R14 ; /* 0x0000000008087229 */ /* 0x001e0c000000000e */ /*0140*/ DMUL R8, R14, R8 ; /* 0x000000080e087228 */ /* 0x001e0c0000000000 */ /*0150*/ DSETP.GEU.AND P0, PT, R8, R16, PT ; /* 0x000000100800722a */ /* 0x001e1c0003f0e000 */ /*0160*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x001fea0003800000 */ /*0170*/ MOV R3, c[0x0][0x160] ; /* 0x0000580000037a02 */ /* 0x000fe20000000f00 */ /*0180*/ DADD R6, R6, 1 ; /* 0x3ff0000006067429 */ /* 0x000e060000000000 */ /*0190*/ ISETP.GE.AND P0, PT, R3, 0x1, PT ; /* 0x000000010300780c */ /* 0x000fc60003f06270 */ /*01a0*/ DFMA R6, R6, R6, R10 ; /* 0x000000060606722b */ /* 0x001e14000000000a */ /*01b0*/ DSETP.LT.OR P0, PT, R6, 0.0625, !P0 ; /* 0x3fb000000600742a */ /* 0x001e1c0004701400 */ /*01c0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x001fea0003800000 */ /*01d0*/ BSSY B0, 0x340 ; /* 0x0000016000007945 */ /* 0x000fe20003800000 */ /*01e0*/ CS2R R4, SRZ ; /* 0x0000000000047805 */ /* 0x000fe2000001ff00 */ /*01f0*/ MOV R6, RZ ; /* 0x000000ff00067202 */ /* 0x000fe40000000f00 */ /*0200*/ IADD3 R3, R1, 0x9c40, RZ ; /* 0x00009c4001037810 */ /* 0x000fc60007ffe0ff */ /*0210*/ FMUL R9, R5, R5 ; /* 0x0000000505097220 */ /* 0x000fe20000400000 */ /*0220*/ LEA R7, R4, R1, 0x2 ; /* 0x0000000104077211 */ /* 0x000fe200078e10ff */ /*0230*/ FADD R8, R6.reuse, R6 ; /* 0x0000000606087221 */ /* 0x040fe40000000000 */ /*0240*/ FFMA R9, R6, R6, -R9 ; /* 0x0000000606097223 */ /* 0x000fe40000000809 */ /*0250*/ FFMA R8, R8, R5, R2 ; /* 0x0000000508087223 */ /* 0x000fe20000000002 */ /*0260*/ STL [R7], R6 ; /* 0x0000000607007387 */ /* 0x0001e20000100800 */ /*0270*/ FADD R10, R0, R9 ; /* 0x00000009000a7221 */ /* 0x000fc60000000000 */ /*0280*/ STL [R7+0x9c40], R5 ; /* 0x009c400507007387 */ /* 0x0001e20000100800 */ /*0290*/ FMUL R9, R10, R10 ; /* 0x0000000a0a097220 */ /* 0x000fc80000400000 */ /*02a0*/ FFMA R9, R8, R8, R9 ; /* 0x0000000808097223 */ /* 0x000fca0000000009 */ /*02b0*/ FSETP.GT.AND P0, PT, R9, 4, PT ; /* 0x408000000900780b */ /* 0x000fda0003f04000 */ /*02c0*/ @P0 BRA 0x330 ; /* 0x0000006000000947 */ /* 0x000fea0003800000 */ /*02d0*/ IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104047810 */ /* 0x001fe40007ffe0ff */ /*02e0*/ MOV R5, R8 ; /* 0x0000000800057202 */ /* 0x000fe40000000f00 */ /*02f0*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x160], PT ; /* 0x0000580004007a0c */ /* 0x000fe40003f06270 */ /*0300*/ MOV R6, R10 ; /* 0x0000000a00067202 */ /* 0x000fd60000000f00 */ /*0310*/ @!P0 BRA 0x210 ; /* 0xfffffef000008947 */ /* 0x000fea000383ffff */ /*0320*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0330*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x001fea0003800000 */ /*0340*/ ISETP.GE.U32.AND P0, PT, R4, 0x2, PT ; /* 0x000000020400780c */ /* 0x000fda0003f06070 */ /*0350*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0360*/ IADD3 R11, R4.reuse, -0x1, RZ ; /* 0xffffffff040b7810 */ /* 0x040fe20007ffe0ff */ /*0370*/ I2F R6, c[0x0][0x164] ; /* 0x0000590000067b06 */ /* 0x000e220000201400 */ /*0380*/ IADD3 R4, R4, -0x2, RZ ; /* 0xfffffffe04047810 */ /* 0x000fe20007ffe0ff */ /*0390*/ BSSY B0, 0x7d0 ; /* 0x0000043000007945 */ /* 0x000fe20003800000 */ /*03a0*/ LOP3.LUT R2, R11, 0x3, RZ, 0xc0, !PT ; /* 0x000000030b027812 */ /* 0x000fe200078ec0ff */ /*03b0*/ HFMA2.MMA R10, -RZ, RZ, 0, 5.9604644775390625e-08 ; /* 0x00000001ff0a7435 */ /* 0x000fe200000001ff */ /*03c0*/ ISETP.GE.U32.AND P1, PT, R4, 0x3, PT ; /* 0x000000030400780c */ /* 0x000fe40003f26070 */ /*03d0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fd60003f05270 */ /*03e0*/ @!P1 BRA 0x7c0 ; /* 0x000003d000009947 */ /* 0x000fea0003800000 */ /*03f0*/ IADD3 R11, R11, -R2, RZ ; /* 0x800000020b0b7210 */ /* 0x001fe40007ffe0ff */ /*0400*/ MOV R10, 0x1 ; /* 0x00000001000a7802 */ /* 0x000fe40000000f00 */ /*0410*/ MOV R7, R3 ; /* 0x0000000300077202 */ /* 0x000fe40000000f00 */ /*0420*/ MOV R0, R1 ; /* 0x0000000100007202 */ /* 0x000fca0000000f00 */ /*0430*/ LDL R4, [R0+0x4] ; /* 0x0000040000047983 */ /* 0x001ea80000100800 */ /*0440*/ LDL R8, [R7+0x4] ; /* 0x0000040007087983 */ /* 0x000ee20000100800 */ /*0450*/ MOV R16, 0x4 ; /* 0x0000000400107802 */ /* 0x000fc60000000f00 */ /*0460*/ LDL R20, [R7+0x10] ; /* 0x0000100007147983 */ /* 0x000f220000100800 */ /*0470*/ FADD R5, R4, 2 ; /* 0x4000000004057421 */ /* 0x004fe40000000000 */ /*0480*/ FADD R9, R8, 2 ; /* 0x4000000008097421 */ /* 0x008fe40000000000 */ /*0490*/ FMUL.D4 R14, R6.reuse, R5 ; /* 0x00000005060e7220 */ /* 0x040fe40000200000 */ /*04a0*/ FMUL.D4 R15, R6, R9 ; /* 0x00000009060f7220 */ /* 0x000fe20000200000 */ /*04b0*/ LDL.64 R4, [R0+0x8] ; /* 0x0000080000047983 */ /* 0x000ea60000100a00 */ /*04c0*/ F2I.TRUNC.NTZ R14, R14 ; /* 0x0000000e000e7305 */ /* 0x000fe2000020f100 */ /*04d0*/ LDL.64 R8, [R7+0x8] ; /* 0x0000080007087983 */ /* 0x000eee0000100a00 */ /*04e0*/ F2I.TRUNC.NTZ R15, R15 ; /* 0x0000000f000f7305 */ /* 0x000e24000020f100 */ /*04f0*/ IMAD R13, R15, c[0x0][0x164], R14 ; /* 0x000059000f0d7a24 */ /* 0x001fc800078e020e */ /*0500*/ IMAD.WIDE R12, R13, R16, c[0x0][0x178] ; /* 0x00005e000d0c7625 */ /* 0x000fca00078e0210 */ /*0510*/ LDG.E R17, [R12.64] ; /* 0x000000040c117981 */ /* 0x000f62000c1e1900 */ /*0520*/ FADD R19, R4, 2 ; /* 0x4000000004137421 */ /* 0x004fe40000000000 */ /*0530*/ FADD R21, R8, 2 ; /* 0x4000000008157421 */ /* 0x008fe40000000000 */ /*0540*/ FMUL.D4 R19, R6.reuse, R19 ; /* 0x0000001306137220 */ /* 0x040fe40000200000 */ /*0550*/ FMUL.D4 R21, R6, R21 ; /* 0x0000001506157220 */ /* 0x000fc80000200000 */ /*0560*/ F2I.TRUNC.NTZ R19, R19 ; /* 0x0000001300137305 */ /* 0x000ff0000020f100 */ /*0570*/ F2I.TRUNC.NTZ R4, R21 ; /* 0x0000001500047305 */ /* 0x000e24000020f100 */ /*0580*/ IMAD R15, R4, c[0x0][0x164], R19 ; /* 0x00005900040f7a24 */ /* 0x001fe200078e0213 */ /*0590*/ IADD3 R17, R17, 0x2, RZ ; /* 0x0000000211117810 */ /* 0x020fe20007ffe0ff */ /*05a0*/ FADD R5, R5, 2 ; /* 0x4000000005057421 */ /* 0x000fe20000000000 */ /*05b0*/ LDL R19, [R0+0x10] ; /* 0x0000100000137983 */ /* 0x000ea20000100800 */ /*05c0*/ IMAD.WIDE R14, R15, R16, c[0x0][0x178] ; /* 0x00005e000f0e7625 */ /* 0x000fc600078e0210 */ /*05d0*/ STG.E [R12.64], R17 ; /* 0x000000110c007986 */ /* 0x0001e8000c101904 */ /*05e0*/ LDG.E R4, [R14.64] ; /* 0x000000040e047981 */ /* 0x000ee2000c1e1900 */ /*05f0*/ FADD R9, R9, 2 ; /* 0x4000000009097421 */ /* 0x000fe40000000000 */ /*0600*/ FMUL.D4 R8, R6.reuse, R5 ; /* 0x0000000506087220 */ /* 0x040fe40000200000 */ /*0610*/ FMUL.D4 R18, R6, R9 ; /* 0x0000000906127220 */ /* 0x000fc80000200000 */ /*0620*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000ff0000020f100 */ /*0630*/ F2I.TRUNC.NTZ R5, R18 ; /* 0x0000001200057305 */ /* 0x000e64000020f100 */ /*0640*/ IMAD R5, R5, c[0x0][0x164], R8 ; /* 0x0000590005057a24 */ /* 0x002fe200078e0208 */ /*0650*/ IADD3 R9, R4, 0x2, RZ ; /* 0x0000000204097810 */ /* 0x008fc60007ffe0ff */ /*0660*/ IMAD.WIDE R4, R5, R16, c[0x0][0x178] ; /* 0x00005e0005047625 */ /* 0x000fe400078e0210 */ /*0670*/ STG.E [R14.64], R9 ; /* 0x000000090e007986 */ /* 0x0003e8000c101904 */ /*0680*/ LDG.E R12, [R4.64] ; /* 0x00000004040c7981 */ /* 0x001ee2000c1e1900 */ /*0690*/ FADD R19, R19, 2 ; /* 0x4000000013137421 */ /* 0x004fe40000000000 */ /*06a0*/ FADD R13, R20, 2 ; /* 0x40000000140d7421 */ /* 0x010fe40000000000 */ /*06b0*/ FMUL.D4 R19, R6, R19 ; /* 0x0000001306137220 */ /* 0x000fc40000200000 */ /*06c0*/ FMUL.D4 R8, R6, R13 ; /* 0x0000000d06087220 */ /* 0x000fc80000200000 */ /*06d0*/ F2I.TRUNC.NTZ R19, R19 ; /* 0x0000001300137305 */ /* 0x000ff0000020f100 */ /*06e0*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e24000020f100 */ /*06f0*/ IMAD R17, R8, c[0x0][0x164], R19 ; /* 0x0000590008117a24 */ /* 0x001fc800078e0213 */ /*0700*/ IMAD.WIDE R16, R17, R16, c[0x0][0x178] ; /* 0x00005e0011107625 */ /* 0x000fe200078e0210 */ /*0710*/ IADD3 R13, R12, 0x2, RZ ; /* 0x000000020c0d7810 */ /* 0x008fca0007ffe0ff */ /*0720*/ STG.E [R4.64], R13 ; /* 0x0000000d04007986 */ /* 0x0001e8000c101904 */ /*0730*/ LDG.E R9, [R16.64] ; /* 0x0000000410097981 */ /* 0x002ea2000c1e1900 */ /*0740*/ IADD3 R11, R11, -0x4, RZ ; /* 0xfffffffc0b0b7810 */ /* 0x000fc80007ffe0ff */ /*0750*/ ISETP.NE.AND P1, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */ /* 0x000fe40003f25270 */ /*0760*/ IADD3 R0, R0, 0x10, RZ ; /* 0x0000001000007810 */ /* 0x000fe40007ffe0ff */ /*0770*/ IADD3 R7, R7, 0x10, RZ ; /* 0x0000001007077810 */ /* 0x000fe40007ffe0ff */ /*0780*/ IADD3 R10, R10, 0x4, RZ ; /* 0x000000040a0a7810 */ /* 0x000fe40007ffe0ff */ /*0790*/ IADD3 R9, R9, 0x2, RZ ; /* 0x0000000209097810 */ /* 0x004fca0007ffe0ff */ /*07a0*/ STG.E [R16.64], R9 ; /* 0x0000000910007986 */ /* 0x0001e2000c101904 */ /*07b0*/ @P1 BRA 0x430 ; /* 0xfffffc7000001947 */ /* 0x000fea000383ffff */ /*07c0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x001fea0003800000 */ /*07d0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*07e0*/ LEA R8, R10.reuse, R3, 0x2 ; /* 0x000000030a087211 */ /* 0x040fe200078e10ff */ /*07f0*/ IMAD R10, R10, 0x4, R1 ; /* 0x000000040a0a7824 */ /* 0x000fca00078e0201 */ /*0800*/ LDL R0, [R10] ; /* 0x000000000a007983 */ /* 0x000ea80000100800 */ /*0810*/ LDL R4, [R8] ; /* 0x0000000008047983 */ /* 0x001ee20000100800 */ /*0820*/ FADD R3, R0, 2 ; /* 0x4000000000037421 */ /* 0x004fe40000000000 */ /*0830*/ FADD R5, R4, 2 ; /* 0x4000000004057421 */ /* 0x008fe40000000000 */ /*0840*/ FMUL.D4 R3, R6, R3 ; /* 0x0000000306037220 */ /* 0x000fc40000200000 */ /*0850*/ FMUL.D4 R0, R6, R5 ; /* 0x0000000506007220 */ /* 0x000fe20000200000 */ /*0860*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fc600000001ff */ /*0870*/ F2I.TRUNC.NTZ R3, R3 ; /* 0x0000000300037305 */ /* 0x000ff0000020f100 */ /*0880*/ F2I.TRUNC.NTZ R0, R0 ; /* 0x0000000000007305 */ /* 0x000e24000020f100 */ /*0890*/ IMAD R4, R0, c[0x0][0x164], R3 ; /* 0x0000590000047a24 */ /* 0x001fc800078e0203 */ /*08a0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x178] ; /* 0x00005e0004047625 */ /* 0x000fca00078e0205 */ /*08b0*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */ /* 0x000ea2000c1e1900 */ /*08c0*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */ /* 0x000fe40007ffe0ff */ /*08d0*/ IADD3 R10, R10, 0x4, RZ ; /* 0x000000040a0a7810 */ /* 0x000fe40007ffe0ff */ /*08e0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fe40003f05270 */ /*08f0*/ IADD3 R8, R8, 0x4, RZ ; /* 0x0000000408087810 */ /* 0x000fe40007ffe0ff */ /*0900*/ IADD3 R7, R7, 0x2, RZ ; /* 0x0000000207077810 */ /* 0x004fca0007ffe0ff */ /*0910*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x0001e8000c101904 */ /*0920*/ @P0 BRA 0x800 ; /* 0xfffffed000000947 */ /* 0x000fea000383ffff */ /*0930*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0940*/ BRA 0x940; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0950*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0960*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0970*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0980*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0990*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected exec .globl exec .p2align 8 .type exec,@function exec: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b128 s[4:7], s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] v_add_co_u32 v2, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo global_load_b32 v4, v[2:3], off global_load_b32 v5, v[0:1], off s_waitcnt vmcnt(1) v_cvt_f64_f32_e32 v[0:1], v4 s_waitcnt vmcnt(0) v_mul_f32_e32 v2, v5, v5 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_cvt_f64_f32_e32 v[2:3], v2 v_add_f64 v[6:7], v[0:1], 0xbfd00000 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[8:9], v[6:7], v[6:7], v[2:3] v_cvt_f32_f64_e32 v8, v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cvt_f64_f32_e32 v[8:9], v8 v_add_f64 v[6:7], v[6:7], v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mul_f64 v[6:7], v[6:7], v[8:9] v_ldexp_f64 v[8:9], v[2:3], -2 v_cmpx_nlt_f64_e32 v[6:7], v[8:9] s_cbranch_execz .LBB0_10 v_add_f64 v[0:1], v[0:1], 1.0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[0:1], v[0:1], v[0:1], v[2:3] v_cmp_ngt_f64_e32 vcc_lo, 0x3fb00000, v[0:1] s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_10 s_load_b32 s4, s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s4, 1 s_cbranch_scc1 .LBB0_10 s_clause 0x1 s_load_b32 s5, s[0:1], 0x4 s_load_b64 s[2:3], s[0:1], 0x18 v_or_b32_e64 v0, 16, 4 v_or_b32_e64 v1, 0x9c50, 4 v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v6, 0 s_mov_b32 s1, 0 s_mov_b32 s6, 0 s_waitcnt lgkmcnt(0) v_cvt_f32_i32_e32 v2, s5 s_branch .LBB0_6 .LBB0_4: s_set_inst_prefetch_distance 0x2 v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v8, v3 .LBB0_5: s_or_b32 exec_lo, exec_lo, s7 s_add_i32 s6, s6, 1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_dual_mov_b32 v3, v8 :: v_dual_mov_b32 v6, v7 s_cmp_eq_u32 s6, s4 s_cselect_b32 s0, -1, 0 s_or_b32 s0, vcc_lo, s0 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s0, exec_lo, s0 s_or_b32 s1, s0, s1 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s1 s_cbranch_execz .LBB0_10 .LBB0_6: v_dual_mul_f32 v7, v6, v6 :: v_dual_add_f32 v8, v3, v3 s_lshl_b32 s0, s6, 2 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) s_add_i32 s7, s0, 16 v_fma_f32 v9, v3, v3, -v7 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1) v_fma_f32 v7, v6, v8, v5 s_add_i32 s0, s0, 0x9c50 scratch_store_b32 off, v3, s7 scratch_store_b32 off, v6, s0 v_dual_add_f32 v8, v4, v9 :: v_dual_mul_f32 v9, v7, v7 v_fmac_f32_e32 v9, v8, v8 s_delay_alu instid0(VALU_DEP_1) v_cmp_lt_f32_e32 vcc_lo, 4.0, v9 s_and_saveexec_b32 s7, vcc_lo s_cbranch_execz .LBB0_5 s_cmp_lt_u32 s6, 2 s_cbranch_scc1 .LBB0_4 v_dual_mov_b32 v7, v1 :: v_dual_mov_b32 v8, v0 s_mov_b32 s8, 1 s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB0_9: scratch_load_b32 v9, v8, off scratch_load_b32 v10, v7, off v_add_nc_u32_e32 v7, 4, v7 s_add_i32 s8, s8, 1 v_add_nc_u32_e32 v8, 4, v8 s_cmp_lt_u32 s8, s6 s_waitcnt vmcnt(0) v_dual_add_f32 v9, 2.0, v9 :: v_dual_add_f32 v10, 2.0, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f32_e32 v9, v9, v2 v_dual_mul_f32 v10, v10, v2 :: v_dual_mul_f32 v9, 0x3e800000, v9 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_f32_e32 v10, 0x3e800000, v10 v_cvt_i32_f32_e32 v9, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cvt_i32_f32_e32 v12, v10 v_mad_u64_u32 v[10:11], null, v12, s5, v[9:10] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v11, 31, v10 v_lshlrev_b64 v[9:10], 2, v[10:11] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_co_u32 v9, s0, s2, v9 v_add_co_ci_u32_e64 v10, s0, s3, v10, s0 global_load_b32 v11, v[9:10], off s_waitcnt vmcnt(0) v_add_nc_u32_e32 v11, 2, v11 global_store_b32 v[9:10], v11, off s_cbranch_scc1 .LBB0_9 s_branch .LBB0_4 .LBB0_10: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel exec .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 80016 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 1 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 13 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size exec, .Lfunc_end0-exec .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .offset: 4 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: exec .private_segment_fixed_size: 80016 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: exec.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 13 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000df471_00000000-6_Buddha2.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z29__device_stub__Z4execiiPfS_PiiiPfS_Pi .type _Z29__device_stub__Z4execiiPfS_PiiiPfS_Pi, @function _Z29__device_stub__Z4execiiPfS_PiiiPfS_Pi: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 28(%rsp) movl %esi, 24(%rsp) movq %rdx, 16(%rsp) movq %rcx, 8(%rsp) movq %r8, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 24(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq exec(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z29__device_stub__Z4execiiPfS_PiiiPfS_Pi, .-_Z29__device_stub__Z4execiiPfS_PiiiPfS_Pi .globl exec .type exec, @function exec: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z4execiiPfS_PiiiPfS_Pi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size exec, .-exec .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "exec" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq exec(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "Buddha2.hip" .globl __device_stub__exec # -- Begin function __device_stub__exec .p2align 4, 0x90 .type __device_stub__exec,@function __device_stub__exec: # @__device_stub__exec .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 4(%rsp) movl %esi, (%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) leaq 4(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) leaq 72(%rsp), %rax movq %rax, 96(%rsp) leaq 64(%rsp), %rax movq %rax, 104(%rsp) leaq 56(%rsp), %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $exec, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size __device_stub__exec, .Lfunc_end0-__device_stub__exec .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $exec, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type exec,@object # @exec .section .rodata,"a",@progbits .globl exec .p2align 3, 0x0 exec: .quad __device_stub__exec .size exec, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "exec" .size .L__unnamed_1, 5 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__exec .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym exec .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> int __host__ file1_sq_func(int x) { cudaError_t err; int nDevices = 0; err = cudaGetDeviceCount(&nDevices); if(err != cudaSuccess) { std::cout << "nDevices: " << nDevices << std::endl; std::cout << "err: " << err << std::endl; return 1; } std::cout << "this library uses cuda code" << std::endl; std::cout << "you have " << nDevices << " devices that support cuda" << std::endl; return x * x; }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> int __host__ file1_sq_func(int x) { cudaError_t err; int nDevices = 0; err = cudaGetDeviceCount(&nDevices); if(err != cudaSuccess) { std::cout << "nDevices: " << nDevices << std::endl; std::cout << "err: " << err << std::endl; return 1; } std::cout << "this library uses cuda code" << std::endl; std::cout << "you have " << nDevices << " devices that support cuda" << std::endl; return x * x; }
.file "tmpxft_0015dd4c_00000000-6_static.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "nDevices: " .LC1: .string "err: " .LC2: .string "this library uses cuda code" .LC3: .string "you have " .LC4: .string " devices that support cuda" .text .globl _Z13file1_sq_funci .type _Z13file1_sq_funci, @function _Z13file1_sq_funci: .LFB3669: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $16, %rsp .cfi_def_cfa_offset 48 movl %edi, %ebx movq %fs:40, %rax movq %rax, 8(%rsp) xorl %eax, %eax movl $0, 4(%rsp) leaq 4(%rsp), %rdi call cudaGetDeviceCount@PLT testl %eax, %eax jne .L24 movl $27, %edx leaq .LC2(%rip), %rsi leaq _ZSt4cout(%rip), %rbp movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq 240(%rbp,%rax), %rbp testq %rbp, %rbp je .L25 cmpb $0, 56(%rbp) je .L16 movzbl 67(%rbp), %esi .L17: movsbl %sil, %esi leaq _ZSt4cout(%rip), %rbp movq %rbp, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $9, %edx leaq .LC3(%rip), %rsi movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl 4(%rsp), %esi movq %rbp, %rdi call _ZNSolsEi@PLT movq %rax, %rbp movl $26, %edx leaq .LC4(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq 0(%rbp), %rax movq -24(%rax), %rax movq 240(%rbp,%rax), %r12 testq %r12, %r12 je .L26 cmpb $0, 56(%r12) je .L20 movzbl 67(%r12), %esi .L21: movsbl %sil, %esi movq %rbp, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl %ebx, %eax imull %ebx, %eax .L3: movq 8(%rsp), %rdx subq %fs:40, %rdx jne .L27 addq $16, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L24: .cfi_restore_state movl %eax, %ebp movl $10, %edx leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl 4(%rsp), %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r12 testq %r12, %r12 je .L28 cmpb $0, 56(%r12) je .L7 movzbl 67(%r12), %esi .L8: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $5, %edx leaq .LC1(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl %ebp, %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L29 cmpb $0, 56(%rbp) je .L11 movzbl 67(%rbp), %esi .L12: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $1, %eax jmp .L3 .L28: movq 8(%rsp), %rax subq %fs:40, %rax jne .L30 call _ZSt16__throw_bad_castv@PLT .L30: call __stack_chk_fail@PLT .L7: movq %r12, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r12), %rax movl $10, %esi movq %r12, %rdi call *48(%rax) movl %eax, %esi jmp .L8 .L29: movq 8(%rsp), %rax subq %fs:40, %rax jne .L31 call _ZSt16__throw_bad_castv@PLT .L31: call __stack_chk_fail@PLT .L11: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L12 .L25: movq 8(%rsp), %rax subq %fs:40, %rax jne .L32 call _ZSt16__throw_bad_castv@PLT .L32: call __stack_chk_fail@PLT .L16: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L17 .L26: movq 8(%rsp), %rax subq %fs:40, %rax jne .L33 call _ZSt16__throw_bad_castv@PLT .L33: call __stack_chk_fail@PLT .L20: movq %r12, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r12), %rax movl $10, %esi movq %r12, %rdi call *48(%rax) movl %eax, %esi jmp .L21 .L27: call __stack_chk_fail@PLT .cfi_endproc .LFE3669: .size _Z13file1_sq_funci, .-_Z13file1_sq_funci .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> int __host__ file1_sq_func(int x) { cudaError_t err; int nDevices = 0; err = cudaGetDeviceCount(&nDevices); if(err != cudaSuccess) { std::cout << "nDevices: " << nDevices << std::endl; std::cout << "err: " << err << std::endl; return 1; } std::cout << "this library uses cuda code" << std::endl; std::cout << "you have " << nDevices << " devices that support cuda" << std::endl; return x * x; }
#include <hip/hip_runtime.h> #include <iostream> int __host__ file1_sq_func(int x) { hipError_t err; int nDevices = 0; err = hipGetDeviceCount(&nDevices); if(err != hipSuccess) { std::cout << "nDevices: " << nDevices << std::endl; std::cout << "err: " << err << std::endl; return 1; } std::cout << "this library uses cuda code" << std::endl; std::cout << "you have " << nDevices << " devices that support cuda" << std::endl; return x * x; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <iostream> int __host__ file1_sq_func(int x) { hipError_t err; int nDevices = 0; err = hipGetDeviceCount(&nDevices); if(err != hipSuccess) { std::cout << "nDevices: " << nDevices << std::endl; std::cout << "err: " << err << std::endl; return 1; } std::cout << "this library uses cuda code" << std::endl; std::cout << "you have " << nDevices << " devices that support cuda" << std::endl; return x * x; }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <iostream> int __host__ file1_sq_func(int x) { hipError_t err; int nDevices = 0; err = hipGetDeviceCount(&nDevices); if(err != hipSuccess) { std::cout << "nDevices: " << nDevices << std::endl; std::cout << "err: " << err << std::endl; return 1; } std::cout << "this library uses cuda code" << std::endl; std::cout << "you have " << nDevices << " devices that support cuda" << std::endl; return x * x; }
.text .file "static.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z13file1_sq_funci # -- Begin function _Z13file1_sq_funci .p2align 4, 0x90 .type _Z13file1_sq_funci,@function _Z13file1_sq_funci: # @_Z13file1_sq_funci .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %edi, %ebx movl $0, 4(%rsp) leaq 4(%rsp), %rdi callq hipGetDeviceCount testl %eax, %eax je .LBB0_10 # %bb.1: movl %eax, %ebp movl $_ZSt4cout, %edi movl $.L.str, %esi movl $10, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl 4(%rsp), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB0_20 # %bb.2: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%rbx) je .LBB0_4 # %bb.3: movzbl 67(%rbx), %ecx jmp .LBB0_5 .LBB0_10: movl $_ZSt4cout, %edi movl $.L.str.2, %esi movl $27, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r14 testq %r14, %r14 je .LBB0_20 # %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i12 cmpb $0, 56(%r14) je .LBB0_13 # %bb.12: movzbl 67(%r14), %eax jmp .LBB0_14 .LBB0_4: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB0_5: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $5, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movl %ebp, %esi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB0_20 # %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i7 cmpb $0, 56(%rbx) je .LBB0_8 # %bb.7: movzbl 67(%rbx), %ecx jmp .LBB0_9 .LBB0_8: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB0_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit10 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $1, %ebx jmp .LBB0_19 .LBB0_13: movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) .LBB0_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit15 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $_ZSt4cout, %edi movl $.L.str.3, %esi movl $9, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl 4(%rsp), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movq %rax, %r14 movl $.L.str.4, %esi movl $26, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%r14), %rax movq -24(%rax), %rax movq 240(%r14,%rax), %r15 testq %r15, %r15 je .LBB0_20 # %bb.15: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i17 cmpb $0, 56(%r15) je .LBB0_17 # %bb.16: movzbl 67(%r15), %eax jmp .LBB0_18 .LBB0_17: movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r15), %rax movq %r15, %rdi movl $10, %esi callq *48(%rax) .LBB0_18: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit20 movsbl %al, %esi movq %r14, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv imull %ebx, %ebx .LBB0_19: movl %ebx, %eax addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB0_20: .cfi_def_cfa_offset 48 callq _ZSt16__throw_bad_castv .Lfunc_end0: .size _Z13file1_sq_funci, .Lfunc_end0-_Z13file1_sq_funci .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "nDevices: " .size .L.str, 11 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "err: " .size .L.str.1, 6 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "this library uses cuda code" .size .L.str.2, 28 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "you have " .size .L.str.3, 10 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz " devices that support cuda" .size .L.str.4, 27 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _ZSt4cout .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0015dd4c_00000000-6_static.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "nDevices: " .LC1: .string "err: " .LC2: .string "this library uses cuda code" .LC3: .string "you have " .LC4: .string " devices that support cuda" .text .globl _Z13file1_sq_funci .type _Z13file1_sq_funci, @function _Z13file1_sq_funci: .LFB3669: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $16, %rsp .cfi_def_cfa_offset 48 movl %edi, %ebx movq %fs:40, %rax movq %rax, 8(%rsp) xorl %eax, %eax movl $0, 4(%rsp) leaq 4(%rsp), %rdi call cudaGetDeviceCount@PLT testl %eax, %eax jne .L24 movl $27, %edx leaq .LC2(%rip), %rsi leaq _ZSt4cout(%rip), %rbp movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq 240(%rbp,%rax), %rbp testq %rbp, %rbp je .L25 cmpb $0, 56(%rbp) je .L16 movzbl 67(%rbp), %esi .L17: movsbl %sil, %esi leaq _ZSt4cout(%rip), %rbp movq %rbp, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $9, %edx leaq .LC3(%rip), %rsi movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl 4(%rsp), %esi movq %rbp, %rdi call _ZNSolsEi@PLT movq %rax, %rbp movl $26, %edx leaq .LC4(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq 0(%rbp), %rax movq -24(%rax), %rax movq 240(%rbp,%rax), %r12 testq %r12, %r12 je .L26 cmpb $0, 56(%r12) je .L20 movzbl 67(%r12), %esi .L21: movsbl %sil, %esi movq %rbp, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl %ebx, %eax imull %ebx, %eax .L3: movq 8(%rsp), %rdx subq %fs:40, %rdx jne .L27 addq $16, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L24: .cfi_restore_state movl %eax, %ebp movl $10, %edx leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl 4(%rsp), %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r12 testq %r12, %r12 je .L28 cmpb $0, 56(%r12) je .L7 movzbl 67(%r12), %esi .L8: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $5, %edx leaq .LC1(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl %ebp, %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L29 cmpb $0, 56(%rbp) je .L11 movzbl 67(%rbp), %esi .L12: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $1, %eax jmp .L3 .L28: movq 8(%rsp), %rax subq %fs:40, %rax jne .L30 call _ZSt16__throw_bad_castv@PLT .L30: call __stack_chk_fail@PLT .L7: movq %r12, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r12), %rax movl $10, %esi movq %r12, %rdi call *48(%rax) movl %eax, %esi jmp .L8 .L29: movq 8(%rsp), %rax subq %fs:40, %rax jne .L31 call _ZSt16__throw_bad_castv@PLT .L31: call __stack_chk_fail@PLT .L11: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L12 .L25: movq 8(%rsp), %rax subq %fs:40, %rax jne .L32 call _ZSt16__throw_bad_castv@PLT .L32: call __stack_chk_fail@PLT .L16: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L17 .L26: movq 8(%rsp), %rax subq %fs:40, %rax jne .L33 call _ZSt16__throw_bad_castv@PLT .L33: call __stack_chk_fail@PLT .L20: movq %r12, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r12), %rax movl $10, %esi movq %r12, %rdi call *48(%rax) movl %eax, %esi jmp .L21 .L27: call __stack_chk_fail@PLT .cfi_endproc .LFE3669: .size _Z13file1_sq_funci, .-_Z13file1_sq_funci .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "static.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z13file1_sq_funci # -- Begin function _Z13file1_sq_funci .p2align 4, 0x90 .type _Z13file1_sq_funci,@function _Z13file1_sq_funci: # @_Z13file1_sq_funci .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %edi, %ebx movl $0, 4(%rsp) leaq 4(%rsp), %rdi callq hipGetDeviceCount testl %eax, %eax je .LBB0_10 # %bb.1: movl %eax, %ebp movl $_ZSt4cout, %edi movl $.L.str, %esi movl $10, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl 4(%rsp), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB0_20 # %bb.2: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%rbx) je .LBB0_4 # %bb.3: movzbl 67(%rbx), %ecx jmp .LBB0_5 .LBB0_10: movl $_ZSt4cout, %edi movl $.L.str.2, %esi movl $27, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r14 testq %r14, %r14 je .LBB0_20 # %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i12 cmpb $0, 56(%r14) je .LBB0_13 # %bb.12: movzbl 67(%r14), %eax jmp .LBB0_14 .LBB0_4: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB0_5: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $5, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movl %ebp, %esi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB0_20 # %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i7 cmpb $0, 56(%rbx) je .LBB0_8 # %bb.7: movzbl 67(%rbx), %ecx jmp .LBB0_9 .LBB0_8: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB0_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit10 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $1, %ebx jmp .LBB0_19 .LBB0_13: movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) .LBB0_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit15 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $_ZSt4cout, %edi movl $.L.str.3, %esi movl $9, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl 4(%rsp), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movq %rax, %r14 movl $.L.str.4, %esi movl $26, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%r14), %rax movq -24(%rax), %rax movq 240(%r14,%rax), %r15 testq %r15, %r15 je .LBB0_20 # %bb.15: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i17 cmpb $0, 56(%r15) je .LBB0_17 # %bb.16: movzbl 67(%r15), %eax jmp .LBB0_18 .LBB0_17: movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r15), %rax movq %r15, %rdi movl $10, %esi callq *48(%rax) .LBB0_18: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit20 movsbl %al, %esi movq %r14, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv imull %ebx, %ebx .LBB0_19: movl %ebx, %eax addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB0_20: .cfi_def_cfa_offset 48 callq _ZSt16__throw_bad_castv .Lfunc_end0: .size _Z13file1_sq_funci, .Lfunc_end0-_Z13file1_sq_funci .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "nDevices: " .size .L.str, 11 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "err: " .size .L.str.1, 6 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "this library uses cuda code" .size .L.str.2, 28 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "you have " .size .L.str.3, 10 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz " devices that support cuda" .size .L.str.4, 27 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _ZSt4cout .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <math.h> __device__ float fact_fun(int idx){ float fact = 1; for(int i = 1; i<idx+1; i++){ fact = fact*i; } fact = 1/fact; return fact; } __global__ void e_sum(float *c){ int duljina = 500; const int idx = threadIdx.x; c[idx] = fact_fun(idx); c[duljina-idx-1] = fact_fun(duljina-idx); }
.file "tmpxft_00189fe9_00000000-6_zrno.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2030: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2030: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z8fact_funi .type _Z8fact_funi, @function _Z8fact_funi: .LFB2027: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2027: .size _Z8fact_funi, .-_Z8fact_funi .globl _Z24__device_stub__Z5e_sumPfPf .type _Z24__device_stub__Z5e_sumPfPf, @function _Z24__device_stub__Z5e_sumPfPf: .LFB2052: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L9 .L5: movq 88(%rsp), %rax subq %fs:40, %rax jne .L10 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z5e_sumPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L5 .L10: call __stack_chk_fail@PLT .cfi_endproc .LFE2052: .size _Z24__device_stub__Z5e_sumPfPf, .-_Z24__device_stub__Z5e_sumPfPf .globl _Z5e_sumPf .type _Z5e_sumPf, @function _Z5e_sumPf: .LFB2053: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z24__device_stub__Z5e_sumPfPf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2053: .size _Z5e_sumPf, .-_Z5e_sumPf .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z5e_sumPf" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2055: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z5e_sumPf(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2055: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <math.h> __device__ float fact_fun(int idx){ float fact = 1; for(int i = 1; i<idx+1; i++){ fact = fact*i; } fact = 1/fact; return fact; } __global__ void e_sum(float *c){ int duljina = 500; const int idx = threadIdx.x; c[idx] = fact_fun(idx); c[duljina-idx-1] = fact_fun(duljina-idx); }
#include <hip/hip_runtime.h> #include <math.h> __device__ float fact_fun(int idx){ float fact = 1; for(int i = 1; i<idx+1; i++){ fact = fact*i; } fact = 1/fact; return fact; } __global__ void e_sum(float *c){ int duljina = 500; const int idx = threadIdx.x; c[idx] = fact_fun(idx); c[duljina-idx-1] = fact_fun(duljina-idx); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <math.h> __device__ float fact_fun(int idx){ float fact = 1; for(int i = 1; i<idx+1; i++){ fact = fact*i; } fact = 1/fact; return fact; } __global__ void e_sum(float *c){ int duljina = 500; const int idx = threadIdx.x; c[idx] = fact_fun(idx); c[duljina-idx-1] = fact_fun(duljina-idx); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z5e_sumPf .globl _Z5e_sumPf .p2align 8 .type _Z5e_sumPf,@function _Z5e_sumPf: v_mov_b32_e32 v1, 1.0 s_mov_b32 s3, 0 s_mov_b32 s2, exec_lo v_cmpx_ne_u32_e32 0, v0 s_cbranch_execz .LBB0_4 v_mov_b32_e32 v1, 1.0 s_mov_b32 s4, 0 .LBB0_2: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_add_i32 s4, s4, 1 v_cvt_f32_i32_e32 v2, s4 v_cmp_eq_u32_e32 vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_mul_f32_e32 v1, v1, v2 s_or_b32 s3, vcc_lo, s3 s_and_not1_b32 exec_lo, exec_lo, s3 s_cbranch_execnz .LBB0_2 s_or_b32 exec_lo, exec_lo, s3 .LBB0_4: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_2) s_or_b32 exec_lo, exec_lo, s2 v_div_scale_f32 v2, null, v1, v1, 1.0 v_div_scale_f32 v5, vcc_lo, 1.0, v1, 1.0 s_load_b64 s[0:1], s[0:1], 0x0 v_rcp_f32_e32 v3, v2 s_mov_b32 s2, exec_lo s_waitcnt_depctr 0xfff v_fma_f32 v4, -v2, v3, 1.0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmac_f32_e32 v3, v4, v3 v_mul_f32_e32 v4, v5, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f32 v6, -v2, v4, v5 v_fmac_f32_e32 v4, v6, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f32 v2, -v2, v4, v5 v_div_fmas_f32 v2, v2, v3, v4 v_lshlrev_b32_e32 v3, 2, v0 s_delay_alu instid0(VALU_DEP_2) v_div_fixup_f32 v2, v2, v1, 1.0 v_mov_b32_e32 v1, 1.0 s_waitcnt lgkmcnt(0) global_store_b32 v3, v2, s[0:1] v_cmpx_gt_u32_e32 0x1f4, v0 s_cbranch_execz .LBB0_8 v_sub_nc_u32_e32 v2, 0, v0 v_mov_b32_e32 v1, 1.0 s_mov_b32 s3, 0 s_movk_i32 s4, 0xfe0c .LBB0_6: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_2) s_add_i32 s5, s4, 0x1f5 s_add_i32 s4, s4, 1 v_cvt_f32_i32_e32 v3, s5 v_cmp_eq_u32_e32 vcc_lo, s4, v2 v_mul_f32_e32 v1, v1, v3 s_or_b32 s3, vcc_lo, s3 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s3 s_cbranch_execnz .LBB0_6 s_or_b32 exec_lo, exec_lo, s3 .LBB0_8: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_2) s_or_b32 exec_lo, exec_lo, s2 v_div_scale_f32 v3, null, v1, v1, 1.0 v_div_scale_f32 v5, vcc_lo, 1.0, v1, 1.0 v_rcp_f32_e32 v4, v3 s_waitcnt_depctr 0xfff v_fma_f32 v2, -v3, v4, 1.0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmac_f32_e32 v4, v2, v4 v_mul_f32_e32 v6, v5, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f32 v2, -v3, v6, v5 v_fmac_f32_e32 v6, v2, v4 v_sub_nc_u32_e32 v2, 0x1f3, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f32 v0, -v3, v6, v5 v_ashrrev_i32_e32 v3, 31, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_div_fmas_f32 v0, v0, v4, v6 v_lshlrev_b64 v[2:3], 2, v[2:3] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_div_fixup_f32 v4, v0, v1, 1.0 v_add_co_u32 v0, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_3) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v3, vcc_lo global_store_b32 v[0:1], v4, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z5e_sumPf .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 8 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 6 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z5e_sumPf, .Lfunc_end0-_Z5e_sumPf .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 8 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z5e_sumPf .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z5e_sumPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <math.h> __device__ float fact_fun(int idx){ float fact = 1; for(int i = 1; i<idx+1; i++){ fact = fact*i; } fact = 1/fact; return fact; } __global__ void e_sum(float *c){ int duljina = 500; const int idx = threadIdx.x; c[idx] = fact_fun(idx); c[duljina-idx-1] = fact_fun(duljina-idx); }
.text .file "zrno.hip" .globl _Z20__device_stub__e_sumPf # -- Begin function _Z20__device_stub__e_sumPf .p2align 4, 0x90 .type _Z20__device_stub__e_sumPf,@function _Z20__device_stub__e_sumPf: # @_Z20__device_stub__e_sumPf .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z5e_sumPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end0: .size _Z20__device_stub__e_sumPf, .Lfunc_end0-_Z20__device_stub__e_sumPf .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5e_sumPf, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z5e_sumPf,@object # @_Z5e_sumPf .section .rodata,"a",@progbits .globl _Z5e_sumPf .p2align 3, 0x0 _Z5e_sumPf: .quad _Z20__device_stub__e_sumPf .size _Z5e_sumPf, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z5e_sumPf" .size .L__unnamed_1, 11 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z20__device_stub__e_sumPf .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z5e_sumPf .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00189fe9_00000000-6_zrno.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2030: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2030: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z8fact_funi .type _Z8fact_funi, @function _Z8fact_funi: .LFB2027: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2027: .size _Z8fact_funi, .-_Z8fact_funi .globl _Z24__device_stub__Z5e_sumPfPf .type _Z24__device_stub__Z5e_sumPfPf, @function _Z24__device_stub__Z5e_sumPfPf: .LFB2052: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L9 .L5: movq 88(%rsp), %rax subq %fs:40, %rax jne .L10 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z5e_sumPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L5 .L10: call __stack_chk_fail@PLT .cfi_endproc .LFE2052: .size _Z24__device_stub__Z5e_sumPfPf, .-_Z24__device_stub__Z5e_sumPfPf .globl _Z5e_sumPf .type _Z5e_sumPf, @function _Z5e_sumPf: .LFB2053: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z24__device_stub__Z5e_sumPfPf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2053: .size _Z5e_sumPf, .-_Z5e_sumPf .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z5e_sumPf" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2055: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z5e_sumPf(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2055: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "zrno.hip" .globl _Z20__device_stub__e_sumPf # -- Begin function _Z20__device_stub__e_sumPf .p2align 4, 0x90 .type _Z20__device_stub__e_sumPf,@function _Z20__device_stub__e_sumPf: # @_Z20__device_stub__e_sumPf .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z5e_sumPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end0: .size _Z20__device_stub__e_sumPf, .Lfunc_end0-_Z20__device_stub__e_sumPf .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5e_sumPf, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z5e_sumPf,@object # @_Z5e_sumPf .section .rodata,"a",@progbits .globl _Z5e_sumPf .p2align 3, 0x0 _Z5e_sumPf: .quad _Z20__device_stub__e_sumPf .size _Z5e_sumPf, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z5e_sumPf" .size .L__unnamed_1, 11 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z20__device_stub__e_sumPf .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z5e_sumPf .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> const int SWEEPS = 100; void printArray(int* arr, int size) { for (int i = 0; i < size; i++) { printf("%d ", arr[i]); } printf("\n"); } int getSize(FILE* fptr) { int size = 0; int temp; while (fscanf(fptr, "%d", &temp) == 1) { size++; } return size; } void getArray(FILE* fptr, int* arr) { int i = 0; while (fscanf(fptr, "%d", &arr[i]) == 1) { i++; } } // CUDA kernel for performing Ising Annealing in parallel __global__ void isingAnnealingStep(int* d_flat, int* d_places, int* d_state, int* count, int vertices, int N){ *count = N; int vertex = blockDim.x*blockIdx.x + threadIdx.x; if(vertex < vertices){ int sigmaI = d_state[vertex]; int H = 0; for (int i = d_places[vertex]; i < d_places[vertex + 1]; i += 2) { int sigmaJ = d_state[d_flat[i]]; int J = d_flat[i + 1]; H -= (J * sigmaI * sigmaJ); } __syncthreads(); curandState_t random; curand_init(0, 0, 0, &random); int s = H / sigmaI; if (s > 0) { d_state[vertex] = -1; } else if (s < 0) { d_state[vertex] = 1; } else { d_state[vertex] = 1 - 2 * (curand(&random) % 2); } __syncthreads(); curand_init(0, 0, 0, &random); if(*count > 0){ int index = curand(&random) % vertices; if (d_state[index] == 1) { d_state[index] = -1; } else { d_state[index] = 1; } *count--; } } } // utility function to compute Hamiltonian given a vertex // ***not used in the parallel implementation*** int computeHamiltonian(int* flat, int* places, int* state, int vertex) { int sigmaI = state[vertex]; int H = 0; for (int i = places[vertex]; i < places[vertex + 1]; i += 2) { int sigmaJ = state[flat[i]]; int J = flat[i + 1]; H -= (J * sigmaI * sigmaJ); } return H; } int main(){ FILE* fptr; // load array $flat fptr = fopen("flat.txt", "r"); if (fptr == NULL) { printf("Error!"); return -1; } // get number of integers for dynamic memory allocation on host int fsize = getSize(fptr); printf("Number of integers present in flat.txt: %d\n", fsize); // read values in text file to array $flat int* flat = (int*)malloc(sizeof(int) * fsize); fseek(fptr, 0, SEEK_SET); getArray(fptr, flat); //printArray(flat, fsize); fclose(fptr); /**********/ // load array $places fptr = fopen("places.txt", "r"); if (fptr == NULL) { printf("Error!"); return -1; } // get number of integers for dynamic memory allocation on host int psize = getSize(fptr); printf("Number of integers present in places.txt: %d\n", psize); // read values in text file to array $places int* places = (int*)malloc(sizeof(int) * psize); fseek(fptr, 0, SEEK_SET); getArray(fptr, places); //printArray(places, psize); fclose(fptr); /**********/ int vertices = psize - 1; int* state = (int*)malloc(sizeof(int) * vertices); printf("Number of vertices: %d\n", vertices); // initialize states randomly for (int i = 0; i < vertices; ++i) { state[i] = 1 - 2 * (rand() % 2); } double initial_energy = 0; for (int i = 0; i < vertices; i++) { int H = computeHamiltonian(flat, places, state, i); initial_energy += H; } printf("Initial energy: %f\n", initial_energy / 2); // allocate memory on device int* d_flat; int* d_places; int* d_state; int* count; cudaMalloc((void**)&count, sizeof(int)); cudaMalloc((void**)&d_flat, fsize * sizeof(int)); cudaMalloc((void**)&d_places, psize*sizeof(int)); cudaMalloc((void**)&d_state, vertices*sizeof(int)); cudaMemcpy(d_flat, flat, fsize*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_places, places, psize*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_state, state, vertices*sizeof(int), cudaMemcpyHostToDevice); int threads = 1024; int blocks = (vertices / 1024) + 1; int N = vertices*0.75; clock_t begin = clock(); for (int i = 0; i < SWEEPS; i++) { isingAnnealingStep<<<blocks, threads>>>(d_flat, d_places, d_state, count, vertices, N); //cudaDeviceSynchronize(); N *= 0.9; } clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; cudaMemcpy(flat, d_flat, fsize*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(places, d_places, psize*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(state, d_state, vertices*sizeof(int), cudaMemcpyDeviceToHost); double final_energy = 0; for (int i = 0; i < vertices; i++) { int H = computeHamiltonian(flat, places, state, i); final_energy += H; } printf("Final energy: %f\n", final_energy / 2); printf("Time taken for parallel Ising Annealing: %f seconds\n", time_spent); // store results in txt files // $time_spent $initial_energy $final_energy printf("Storing parallel code's results in results.txt...\n"); fptr = fopen("results.txt", "a"); fprintf(fptr, "Parallel\t%d\t%f\t%f\t%f\n", vertices, time_spent, initial_energy, final_energy); fclose(fptr); printf("Finished writing to results.txt\n"); // free dynamic memory on host and device free(flat); free(places); free(state); cudaFree(d_flat); cudaFree(d_places); cudaFree(d_state); return 0; }
.file "tmpxft_0018d839_00000000-6_ising_annealing_parallel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2278: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2278: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%d " .LC1: .string "\n" .text .globl _Z10printArrayPii .type _Z10printArrayPii, @function _Z10printArrayPii: .LFB2271: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 testl %esi, %esi jle .L4 movq %rdi, %rbx movslq %esi, %rsi leaq (%rdi,%rsi,4), %r12 leaq .LC0(%rip), %rbp .L5: movl (%rbx), %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $4, %rbx cmpq %r12, %rbx jne .L5 .L4: leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2271: .size _Z10printArrayPii, .-_Z10printArrayPii .section .rodata.str1.1 .LC2: .string "%d" .text .globl _Z7getSizeP8_IO_FILE .type _Z7getSizeP8_IO_FILE, @function _Z7getSizeP8_IO_FILE: .LFB2272: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $16, %rsp .cfi_def_cfa_offset 48 movq %rdi, %rbp movq %fs:40, %rax movq %rax, 8(%rsp) xorl %eax, %eax movl $0, %ebx leaq .LC2(%rip), %r12 jmp .L9 .L10: addl $1, %ebx .L9: leaq 4(%rsp), %rdx movq %r12, %rsi movq %rbp, %rdi movl $0, %eax call __isoc23_fscanf@PLT cmpl $1, %eax je .L10 movq 8(%rsp), %rax subq %fs:40, %rax jne .L13 movl %ebx, %eax addq $16, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L13: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2272: .size _Z7getSizeP8_IO_FILE, .-_Z7getSizeP8_IO_FILE .globl _Z8getArrayP8_IO_FILEPi .type _Z8getArrayP8_IO_FILEPi, @function _Z8getArrayP8_IO_FILEPi: .LFB2273: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movq %rdi, %rbp movq %rsi, %rbx leaq .LC2(%rip), %r12 .L15: movq %rbx, %rdx movq %r12, %rsi movq %rbp, %rdi movl $0, %eax call __isoc23_fscanf@PLT addq $4, %rbx cmpl $1, %eax je .L15 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2273: .size _Z8getArrayP8_IO_FILEPi, .-_Z8getArrayP8_IO_FILEPi .globl _Z18computeHamiltonianPiS_S_i .type _Z18computeHamiltonianPiS_S_i, @function _Z18computeHamiltonianPiS_S_i: .LFB2274: .cfi_startproc endbr64 movq %rdi, %r9 movq %rdx, %rdi movslq %ecx, %rcx movl (%rdx,%rcx,4), %r8d movl (%rsi,%rcx,4), %r10d movl 4(%rsi,%rcx,4), %edx cmpl %edx, %r10d jge .L21 movslq %r10d, %rcx leaq (%r9,%rcx,4), %rax subl %r10d, %edx subl $1, %edx shrl %edx leal (%rdx,%rdx), %edx addq %rcx, %rdx leaq 8(%r9,%rdx,4), %r9 movl $0, %ecx .L20: movslq (%rax), %rsi movl %r8d, %edx imull 4(%rax), %edx imull (%rdi,%rsi,4), %edx subl %edx, %ecx addq $8, %rax cmpq %r9, %rax jne .L20 .L18: movl %ecx, %eax ret .L21: movl $0, %ecx jmp .L18 .cfi_endproc .LFE2274: .size _Z18computeHamiltonianPiS_S_i, .-_Z18computeHamiltonianPiS_S_i .globl _Z46__device_stub__Z18isingAnnealingStepPiS_S_S_iiPiS_S_S_ii .type _Z46__device_stub__Z18isingAnnealingStepPiS_S_S_iiPiS_S_S_ii, @function _Z46__device_stub__Z18isingAnnealingStepPiS_S_S_iiPiS_S_S_ii: .LFB2300: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movl %r8d, 12(%rsp) movl %r9d, 8(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 12(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L27 .L23: movq 168(%rsp), %rax subq %fs:40, %rax jne .L28 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L27: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z18isingAnnealingStepPiS_S_S_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L23 .L28: call __stack_chk_fail@PLT .cfi_endproc .LFE2300: .size _Z46__device_stub__Z18isingAnnealingStepPiS_S_S_iiPiS_S_S_ii, .-_Z46__device_stub__Z18isingAnnealingStepPiS_S_S_iiPiS_S_S_ii .globl _Z18isingAnnealingStepPiS_S_S_ii .type _Z18isingAnnealingStepPiS_S_S_ii, @function _Z18isingAnnealingStepPiS_S_S_ii: .LFB2301: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z46__device_stub__Z18isingAnnealingStepPiS_S_S_iiPiS_S_S_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2301: .size _Z18isingAnnealingStepPiS_S_S_ii, .-_Z18isingAnnealingStepPiS_S_S_ii .section .rodata.str1.1 .LC4: .string "r" .LC5: .string "flat.txt" .LC6: .string "Error!" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC7: .string "Number of integers present in flat.txt: %d\n" .section .rodata.str1.1 .LC8: .string "places.txt" .section .rodata.str1.8 .align 8 .LC9: .string "Number of integers present in places.txt: %d\n" .section .rodata.str1.1 .LC10: .string "Number of vertices: %d\n" .LC12: .string "Initial energy: %f\n" .LC16: .string "Final energy: %f\n" .section .rodata.str1.8 .align 8 .LC17: .string "Time taken for parallel Ising Annealing: %f seconds\n" .align 8 .LC18: .string "Storing parallel code's results in results.txt...\n" .section .rodata.str1.1 .LC19: .string "a" .LC20: .string "results.txt" .LC21: .string "Parallel\t%d\t%f\t%f\t%f\n" .section .rodata.str1.8 .align 8 .LC22: .string "Finished writing to results.txt\n" .text .globl main .type main, @function main: .LFB2275: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $136, %rsp .cfi_def_cfa_offset 192 movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq .LC4(%rip), %rsi leaq .LC5(%rip), %rdi call fopen@PLT testq %rax, %rax je .L50 movq %rax, %rbx movq %rax, %rdi call _Z7getSizeP8_IO_FILE movl %eax, %ebp movl %eax, %edx leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movslq %ebp, %rbp leaq 0(,%rbp,4), %rax movq %rax, 8(%rsp) movq %rax, %rdi call malloc@PLT movq %rax, %r15 movl $0, %edx movl $0, %esi movq %rbx, %rdi call fseek@PLT movq %r15, %rsi movq %rbx, %rdi call _Z8getArrayP8_IO_FILEPi movq %rbx, %rdi call fclose@PLT leaq .LC4(%rip), %rsi leaq .LC8(%rip), %rdi call fopen@PLT movq %rax, %rbx testq %rax, %rax je .L51 movq %rax, %rdi call _Z7getSizeP8_IO_FILE movl %eax, %ebp movl %eax, 52(%rsp) movl %eax, %edx leaq .LC9(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movslq %ebp, %rax leaq 0(,%rax,4), %r14 movq %r14, 32(%rsp) movq %r14, %rdi call malloc@PLT movq %rax, %r13 movq %rax, 24(%rsp) movl $0, %edx movl $0, %esi movq %rbx, %rdi call fseek@PLT movq %r13, %rsi movq %rbx, %rdi call _Z8getArrayP8_IO_FILEPi movq %rbx, %rdi call fclose@PLT leal -1(%rbp), %r13d leaq -4(%r14), %rax movq %rax, 40(%rsp) movq %rax, %rdi call malloc@PLT movq %rax, %r14 movl %r13d, %edx leaq .LC10(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT testl %r13d, %r13d jle .L43 movq %r14, %rbx leal -2(%rbp), %eax leaq 4(%r14,%rax,4), %r12 movl $1, %ebp .L36: call rand@PLT movl %eax, %edx shrl $31, %edx addl %edx, %eax andl $1, %eax subl %edx, %eax addl %eax, %eax movl %ebp, %edx subl %eax, %edx movl %edx, (%rbx) addq $4, %rbx cmpq %r12, %rbx jne .L36 movl 52(%rsp), %eax leal -1(%rax), %ebp movl $0, %ebx movq $0x000000000, 16(%rsp) movq 24(%rsp), %r12 .L37: movl %ebx, %ecx movq %r14, %rdx movq %r12, %rsi movq %r15, %rdi call _Z18computeHamiltonianPiS_S_i pxor %xmm0, %xmm0 cvtsi2sdl %eax, %xmm0 addsd 16(%rsp), %xmm0 movsd %xmm0, 16(%rsp) addl $1, %ebx cmpl %ebx, %ebp jne .L37 .L35: movsd 16(%rsp), %xmm0 mulsd .LC11(%rip), %xmm0 leaq .LC12(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT leaq 88(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT leaq 64(%rsp), %rdi movq 8(%rsp), %r12 movq %r12, %rsi call cudaMalloc@PLT leaq 72(%rsp), %rdi movq 32(%rsp), %rbp movq %rbp, %rsi call cudaMalloc@PLT leaq 80(%rsp), %rdi movq 40(%rsp), %rbx movq %rbx, %rsi call cudaMalloc@PLT movl $1, %ecx movq %r12, %rdx movq %r15, %rsi movq 64(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbp, %rdx movq 24(%rsp), %rsi movq 72(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %r14, %rsi movq 80(%rsp), %rdi call cudaMemcpy@PLT leal 1023(%r13), %r12d testl %r13d, %r13d cmovns %r13d, %r12d sarl $10, %r12d addl $1, %r12d pxor %xmm0, %xmm0 cvtsi2sdl %r13d, %xmm0 mulsd .LC13(%rip), %xmm0 cvttsd2sil %xmm0, %ebp call clock@PLT movq %rax, 56(%rsp) movl $100, %ebx jmp .L39 .L50: leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $-1, %eax jmp .L31 .L51: leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $-1, %eax jmp .L31 .L43: movq $0x000000000, 16(%rsp) jmp .L35 .L38: pxor %xmm0, %xmm0 cvtsi2sdl %ebp, %xmm0 mulsd .LC14(%rip), %xmm0 cvttsd2sil %xmm0, %ebp subl $1, %ebx je .L52 .L39: movl $1024, 108(%rsp) movl $1, 112(%rsp) movl %r12d, 96(%rsp) movl $1, 100(%rsp) movl $0, %r9d movl $0, %r8d movq 108(%rsp), %rdx movl $1, %ecx movq 96(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L38 movl %ebp, %r9d movl %r13d, %r8d movq 88(%rsp), %rcx movq 80(%rsp), %rdx movq 72(%rsp), %rsi movq 64(%rsp), %rdi call _Z46__device_stub__Z18isingAnnealingStepPiS_S_S_iiPiS_S_S_ii jmp .L38 .L52: call clock@PLT movq 56(%rsp), %rcx subq %rcx, %rax pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 divsd .LC15(%rip), %xmm0 movq %xmm0, %r12 movl $2, %ecx movq 8(%rsp), %rdx movq 64(%rsp), %rsi movq %r15, %rdi call cudaMemcpy@PLT movl $2, %ecx movq 32(%rsp), %rdx movq 72(%rsp), %rsi movq 24(%rsp), %rdi call cudaMemcpy@PLT movl $2, %ecx movq 40(%rsp), %rdx movq 80(%rsp), %rsi movq %r14, %rdi call cudaMemcpy@PLT testl %r13d, %r13d jle .L44 movl 52(%rsp), %ebp subl $1, %ebp movl $0, %ebx movq $0x000000000, 8(%rsp) movq %r12, 32(%rsp) movq 24(%rsp), %r12 .L41: movl %ebx, %ecx movq %r14, %rdx movq %r12, %rsi movq %r15, %rdi call _Z18computeHamiltonianPiS_S_i pxor %xmm0, %xmm0 cvtsi2sdl %eax, %xmm0 addsd 8(%rsp), %xmm0 movsd %xmm0, 8(%rsp) addl $1, %ebx cmpl %ebx, %ebp jne .L41 movq 32(%rsp), %r12 .L40: movsd 8(%rsp), %xmm0 mulsd .LC11(%rip), %xmm0 leaq .LC16(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq %r12, %xmm0 leaq .LC17(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT leaq .LC18(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC19(%rip), %rsi leaq .LC20(%rip), %rdi call fopen@PLT movq %rax, %rbx movsd 8(%rsp), %xmm2 movsd 16(%rsp), %xmm1 movq %r12, %xmm0 movl %r13d, %ecx leaq .LC21(%rip), %rdx movl $2, %esi movq %rax, %rdi movl $3, %eax call __fprintf_chk@PLT movq %rbx, %rdi call fclose@PLT leaq .LC22(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %r15, %rdi call free@PLT movq 24(%rsp), %rdi call free@PLT movq %r14, %rdi call free@PLT movq 64(%rsp), %rdi call cudaFree@PLT movq 72(%rsp), %rdi call cudaFree@PLT movq 80(%rsp), %rdi call cudaFree@PLT movl $0, %eax .L31: movq 120(%rsp), %rdx subq %fs:40, %rdx jne .L53 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L44: .cfi_restore_state movq $0x000000000, 8(%rsp) jmp .L40 .L53: call __stack_chk_fail@PLT .cfi_endproc .LFE2275: .size main, .-main .section .rodata.str1.8 .align 8 .LC23: .string "_Z18isingAnnealingStepPiS_S_S_ii" .section .rodata.str1.1 .LC24: .string "precalc_xorwow_matrix" .LC25: .string "precalc_xorwow_offset_matrix" .LC26: .string "mrg32k3aM1" .LC27: .string "mrg32k3aM2" .LC28: .string "mrg32k3aM1SubSeq" .LC29: .string "mrg32k3aM2SubSeq" .LC30: .string "mrg32k3aM1Seq" .LC31: .string "mrg32k3aM2Seq" .LC32: .string "__cr_lgamma_table" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2303: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC23(%rip), %rdx movq %rdx, %rcx leaq _Z18isingAnnealingStepPiS_S_S_ii(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $102400, %r9d movl $0, %r8d leaq .LC24(%rip), %rdx movq %rdx, %rcx leaq _ZL21precalc_xorwow_matrix(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $102400, %r9d movl $0, %r8d leaq .LC25(%rip), %rdx movq %rdx, %rcx leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC26(%rip), %rdx movq %rdx, %rcx leaq _ZL10mrg32k3aM1(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC27(%rip), %rdx movq %rdx, %rcx leaq _ZL10mrg32k3aM2(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2016, %r9d movl $0, %r8d leaq .LC28(%rip), %rdx movq %rdx, %rcx leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2016, %r9d movl $0, %r8d leaq .LC29(%rip), %rdx movq %rdx, %rcx leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC30(%rip), %rdx movq %rdx, %rcx leaq _ZL13mrg32k3aM1Seq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC31(%rip), %rdx movq %rdx, %rcx leaq _ZL13mrg32k3aM2Seq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $1 .cfi_def_cfa_offset 32 movl $72, %r9d movl $0, %r8d leaq .LC32(%rip), %rdx movq %rdx, %rcx leaq _ZL17__cr_lgamma_table(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2303: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .local _ZL17__cr_lgamma_table .comm _ZL17__cr_lgamma_table,72,32 .local _ZL13mrg32k3aM2Seq .comm _ZL13mrg32k3aM2Seq,2304,32 .local _ZL13mrg32k3aM1Seq .comm _ZL13mrg32k3aM1Seq,2304,32 .local _ZL16mrg32k3aM2SubSeq .comm _ZL16mrg32k3aM2SubSeq,2016,32 .local _ZL16mrg32k3aM1SubSeq .comm _ZL16mrg32k3aM1SubSeq,2016,32 .local _ZL10mrg32k3aM2 .comm _ZL10mrg32k3aM2,2304,32 .local _ZL10mrg32k3aM1 .comm _ZL10mrg32k3aM1,2304,32 .local _ZL28precalc_xorwow_offset_matrix .comm _ZL28precalc_xorwow_offset_matrix,102400,32 .local _ZL21precalc_xorwow_matrix .comm _ZL21precalc_xorwow_matrix,102400,32 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC11: .long 0 .long 1071644672 .align 8 .LC13: .long 0 .long 1072168960 .align 8 .LC14: .long -858993459 .long 1072483532 .align 8 .LC15: .long 0 .long 1093567616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> const int SWEEPS = 100; void printArray(int* arr, int size) { for (int i = 0; i < size; i++) { printf("%d ", arr[i]); } printf("\n"); } int getSize(FILE* fptr) { int size = 0; int temp; while (fscanf(fptr, "%d", &temp) == 1) { size++; } return size; } void getArray(FILE* fptr, int* arr) { int i = 0; while (fscanf(fptr, "%d", &arr[i]) == 1) { i++; } } // CUDA kernel for performing Ising Annealing in parallel __global__ void isingAnnealingStep(int* d_flat, int* d_places, int* d_state, int* count, int vertices, int N){ *count = N; int vertex = blockDim.x*blockIdx.x + threadIdx.x; if(vertex < vertices){ int sigmaI = d_state[vertex]; int H = 0; for (int i = d_places[vertex]; i < d_places[vertex + 1]; i += 2) { int sigmaJ = d_state[d_flat[i]]; int J = d_flat[i + 1]; H -= (J * sigmaI * sigmaJ); } __syncthreads(); curandState_t random; curand_init(0, 0, 0, &random); int s = H / sigmaI; if (s > 0) { d_state[vertex] = -1; } else if (s < 0) { d_state[vertex] = 1; } else { d_state[vertex] = 1 - 2 * (curand(&random) % 2); } __syncthreads(); curand_init(0, 0, 0, &random); if(*count > 0){ int index = curand(&random) % vertices; if (d_state[index] == 1) { d_state[index] = -1; } else { d_state[index] = 1; } *count--; } } } // utility function to compute Hamiltonian given a vertex // ***not used in the parallel implementation*** int computeHamiltonian(int* flat, int* places, int* state, int vertex) { int sigmaI = state[vertex]; int H = 0; for (int i = places[vertex]; i < places[vertex + 1]; i += 2) { int sigmaJ = state[flat[i]]; int J = flat[i + 1]; H -= (J * sigmaI * sigmaJ); } return H; } int main(){ FILE* fptr; // load array $flat fptr = fopen("flat.txt", "r"); if (fptr == NULL) { printf("Error!"); return -1; } // get number of integers for dynamic memory allocation on host int fsize = getSize(fptr); printf("Number of integers present in flat.txt: %d\n", fsize); // read values in text file to array $flat int* flat = (int*)malloc(sizeof(int) * fsize); fseek(fptr, 0, SEEK_SET); getArray(fptr, flat); //printArray(flat, fsize); fclose(fptr); /**********/ // load array $places fptr = fopen("places.txt", "r"); if (fptr == NULL) { printf("Error!"); return -1; } // get number of integers for dynamic memory allocation on host int psize = getSize(fptr); printf("Number of integers present in places.txt: %d\n", psize); // read values in text file to array $places int* places = (int*)malloc(sizeof(int) * psize); fseek(fptr, 0, SEEK_SET); getArray(fptr, places); //printArray(places, psize); fclose(fptr); /**********/ int vertices = psize - 1; int* state = (int*)malloc(sizeof(int) * vertices); printf("Number of vertices: %d\n", vertices); // initialize states randomly for (int i = 0; i < vertices; ++i) { state[i] = 1 - 2 * (rand() % 2); } double initial_energy = 0; for (int i = 0; i < vertices; i++) { int H = computeHamiltonian(flat, places, state, i); initial_energy += H; } printf("Initial energy: %f\n", initial_energy / 2); // allocate memory on device int* d_flat; int* d_places; int* d_state; int* count; cudaMalloc((void**)&count, sizeof(int)); cudaMalloc((void**)&d_flat, fsize * sizeof(int)); cudaMalloc((void**)&d_places, psize*sizeof(int)); cudaMalloc((void**)&d_state, vertices*sizeof(int)); cudaMemcpy(d_flat, flat, fsize*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_places, places, psize*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_state, state, vertices*sizeof(int), cudaMemcpyHostToDevice); int threads = 1024; int blocks = (vertices / 1024) + 1; int N = vertices*0.75; clock_t begin = clock(); for (int i = 0; i < SWEEPS; i++) { isingAnnealingStep<<<blocks, threads>>>(d_flat, d_places, d_state, count, vertices, N); //cudaDeviceSynchronize(); N *= 0.9; } clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; cudaMemcpy(flat, d_flat, fsize*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(places, d_places, psize*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(state, d_state, vertices*sizeof(int), cudaMemcpyDeviceToHost); double final_energy = 0; for (int i = 0; i < vertices; i++) { int H = computeHamiltonian(flat, places, state, i); final_energy += H; } printf("Final energy: %f\n", final_energy / 2); printf("Time taken for parallel Ising Annealing: %f seconds\n", time_spent); // store results in txt files // $time_spent $initial_energy $final_energy printf("Storing parallel code's results in results.txt...\n"); fptr = fopen("results.txt", "a"); fprintf(fptr, "Parallel\t%d\t%f\t%f\t%f\n", vertices, time_spent, initial_energy, final_energy); fclose(fptr); printf("Finished writing to results.txt\n"); // free dynamic memory on host and device free(flat); free(places); free(state); cudaFree(d_flat); cudaFree(d_places); cudaFree(d_state); return 0; }
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> const int SWEEPS = 100; void printArray(int* arr, int size) { for (int i = 0; i < size; i++) { printf("%d ", arr[i]); } printf("\n"); } int getSize(FILE* fptr) { int size = 0; int temp; while (fscanf(fptr, "%d", &temp) == 1) { size++; } return size; } void getArray(FILE* fptr, int* arr) { int i = 0; while (fscanf(fptr, "%d", &arr[i]) == 1) { i++; } } // CUDA kernel for performing Ising Annealing in parallel __global__ void isingAnnealingStep(int* d_flat, int* d_places, int* d_state, int* count, int vertices, int N){ *count = N; int vertex = blockDim.x*blockIdx.x + threadIdx.x; if(vertex < vertices){ int sigmaI = d_state[vertex]; int H = 0; for (int i = d_places[vertex]; i < d_places[vertex + 1]; i += 2) { int sigmaJ = d_state[d_flat[i]]; int J = d_flat[i + 1]; H -= (J * sigmaI * sigmaJ); } __syncthreads(); hiprandState_t random; hiprand_init(0, 0, 0, &random); int s = H / sigmaI; if (s > 0) { d_state[vertex] = -1; } else if (s < 0) { d_state[vertex] = 1; } else { d_state[vertex] = 1 - 2 * (hiprand(&random) % 2); } __syncthreads(); hiprand_init(0, 0, 0, &random); if(*count > 0){ int index = hiprand(&random) % vertices; if (d_state[index] == 1) { d_state[index] = -1; } else { d_state[index] = 1; } *count--; } } } // utility function to compute Hamiltonian given a vertex // ***not used in the parallel implementation*** int computeHamiltonian(int* flat, int* places, int* state, int vertex) { int sigmaI = state[vertex]; int H = 0; for (int i = places[vertex]; i < places[vertex + 1]; i += 2) { int sigmaJ = state[flat[i]]; int J = flat[i + 1]; H -= (J * sigmaI * sigmaJ); } return H; } int main(){ FILE* fptr; // load array $flat fptr = fopen("flat.txt", "r"); if (fptr == NULL) { printf("Error!"); return -1; } // get number of integers for dynamic memory allocation on host int fsize = getSize(fptr); printf("Number of integers present in flat.txt: %d\n", fsize); // read values in text file to array $flat int* flat = (int*)malloc(sizeof(int) * fsize); fseek(fptr, 0, SEEK_SET); getArray(fptr, flat); //printArray(flat, fsize); fclose(fptr); /**********/ // load array $places fptr = fopen("places.txt", "r"); if (fptr == NULL) { printf("Error!"); return -1; } // get number of integers for dynamic memory allocation on host int psize = getSize(fptr); printf("Number of integers present in places.txt: %d\n", psize); // read values in text file to array $places int* places = (int*)malloc(sizeof(int) * psize); fseek(fptr, 0, SEEK_SET); getArray(fptr, places); //printArray(places, psize); fclose(fptr); /**********/ int vertices = psize - 1; int* state = (int*)malloc(sizeof(int) * vertices); printf("Number of vertices: %d\n", vertices); // initialize states randomly for (int i = 0; i < vertices; ++i) { state[i] = 1 - 2 * (rand() % 2); } double initial_energy = 0; for (int i = 0; i < vertices; i++) { int H = computeHamiltonian(flat, places, state, i); initial_energy += H; } printf("Initial energy: %f\n", initial_energy / 2); // allocate memory on device int* d_flat; int* d_places; int* d_state; int* count; hipMalloc((void**)&count, sizeof(int)); hipMalloc((void**)&d_flat, fsize * sizeof(int)); hipMalloc((void**)&d_places, psize*sizeof(int)); hipMalloc((void**)&d_state, vertices*sizeof(int)); hipMemcpy(d_flat, flat, fsize*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_places, places, psize*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_state, state, vertices*sizeof(int), hipMemcpyHostToDevice); int threads = 1024; int blocks = (vertices / 1024) + 1; int N = vertices*0.75; clock_t begin = clock(); for (int i = 0; i < SWEEPS; i++) { isingAnnealingStep<<<blocks, threads>>>(d_flat, d_places, d_state, count, vertices, N); //cudaDeviceSynchronize(); N *= 0.9; } clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; hipMemcpy(flat, d_flat, fsize*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(places, d_places, psize*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(state, d_state, vertices*sizeof(int), hipMemcpyDeviceToHost); double final_energy = 0; for (int i = 0; i < vertices; i++) { int H = computeHamiltonian(flat, places, state, i); final_energy += H; } printf("Final energy: %f\n", final_energy / 2); printf("Time taken for parallel Ising Annealing: %f seconds\n", time_spent); // store results in txt files // $time_spent $initial_energy $final_energy printf("Storing parallel code's results in results.txt...\n"); fptr = fopen("results.txt", "a"); fprintf(fptr, "Parallel\t%d\t%f\t%f\t%f\n", vertices, time_spent, initial_energy, final_energy); fclose(fptr); printf("Finished writing to results.txt\n"); // free dynamic memory on host and device free(flat); free(places); free(state); hipFree(d_flat); hipFree(d_places); hipFree(d_state); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> const int SWEEPS = 100; void printArray(int* arr, int size) { for (int i = 0; i < size; i++) { printf("%d ", arr[i]); } printf("\n"); } int getSize(FILE* fptr) { int size = 0; int temp; while (fscanf(fptr, "%d", &temp) == 1) { size++; } return size; } void getArray(FILE* fptr, int* arr) { int i = 0; while (fscanf(fptr, "%d", &arr[i]) == 1) { i++; } } // CUDA kernel for performing Ising Annealing in parallel __global__ void isingAnnealingStep(int* d_flat, int* d_places, int* d_state, int* count, int vertices, int N){ *count = N; int vertex = blockDim.x*blockIdx.x + threadIdx.x; if(vertex < vertices){ int sigmaI = d_state[vertex]; int H = 0; for (int i = d_places[vertex]; i < d_places[vertex + 1]; i += 2) { int sigmaJ = d_state[d_flat[i]]; int J = d_flat[i + 1]; H -= (J * sigmaI * sigmaJ); } __syncthreads(); hiprandState_t random; hiprand_init(0, 0, 0, &random); int s = H / sigmaI; if (s > 0) { d_state[vertex] = -1; } else if (s < 0) { d_state[vertex] = 1; } else { d_state[vertex] = 1 - 2 * (hiprand(&random) % 2); } __syncthreads(); hiprand_init(0, 0, 0, &random); if(*count > 0){ int index = hiprand(&random) % vertices; if (d_state[index] == 1) { d_state[index] = -1; } else { d_state[index] = 1; } *count--; } } } // utility function to compute Hamiltonian given a vertex // ***not used in the parallel implementation*** int computeHamiltonian(int* flat, int* places, int* state, int vertex) { int sigmaI = state[vertex]; int H = 0; for (int i = places[vertex]; i < places[vertex + 1]; i += 2) { int sigmaJ = state[flat[i]]; int J = flat[i + 1]; H -= (J * sigmaI * sigmaJ); } return H; } int main(){ FILE* fptr; // load array $flat fptr = fopen("flat.txt", "r"); if (fptr == NULL) { printf("Error!"); return -1; } // get number of integers for dynamic memory allocation on host int fsize = getSize(fptr); printf("Number of integers present in flat.txt: %d\n", fsize); // read values in text file to array $flat int* flat = (int*)malloc(sizeof(int) * fsize); fseek(fptr, 0, SEEK_SET); getArray(fptr, flat); //printArray(flat, fsize); fclose(fptr); /**********/ // load array $places fptr = fopen("places.txt", "r"); if (fptr == NULL) { printf("Error!"); return -1; } // get number of integers for dynamic memory allocation on host int psize = getSize(fptr); printf("Number of integers present in places.txt: %d\n", psize); // read values in text file to array $places int* places = (int*)malloc(sizeof(int) * psize); fseek(fptr, 0, SEEK_SET); getArray(fptr, places); //printArray(places, psize); fclose(fptr); /**********/ int vertices = psize - 1; int* state = (int*)malloc(sizeof(int) * vertices); printf("Number of vertices: %d\n", vertices); // initialize states randomly for (int i = 0; i < vertices; ++i) { state[i] = 1 - 2 * (rand() % 2); } double initial_energy = 0; for (int i = 0; i < vertices; i++) { int H = computeHamiltonian(flat, places, state, i); initial_energy += H; } printf("Initial energy: %f\n", initial_energy / 2); // allocate memory on device int* d_flat; int* d_places; int* d_state; int* count; hipMalloc((void**)&count, sizeof(int)); hipMalloc((void**)&d_flat, fsize * sizeof(int)); hipMalloc((void**)&d_places, psize*sizeof(int)); hipMalloc((void**)&d_state, vertices*sizeof(int)); hipMemcpy(d_flat, flat, fsize*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_places, places, psize*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_state, state, vertices*sizeof(int), hipMemcpyHostToDevice); int threads = 1024; int blocks = (vertices / 1024) + 1; int N = vertices*0.75; clock_t begin = clock(); for (int i = 0; i < SWEEPS; i++) { isingAnnealingStep<<<blocks, threads>>>(d_flat, d_places, d_state, count, vertices, N); //cudaDeviceSynchronize(); N *= 0.9; } clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; hipMemcpy(flat, d_flat, fsize*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(places, d_places, psize*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(state, d_state, vertices*sizeof(int), hipMemcpyDeviceToHost); double final_energy = 0; for (int i = 0; i < vertices; i++) { int H = computeHamiltonian(flat, places, state, i); final_energy += H; } printf("Final energy: %f\n", final_energy / 2); printf("Time taken for parallel Ising Annealing: %f seconds\n", time_spent); // store results in txt files // $time_spent $initial_energy $final_energy printf("Storing parallel code's results in results.txt...\n"); fptr = fopen("results.txt", "a"); fprintf(fptr, "Parallel\t%d\t%f\t%f\t%f\n", vertices, time_spent, initial_energy, final_energy); fclose(fptr); printf("Finished writing to results.txt\n"); // free dynamic memory on host and device free(flat); free(places); free(state); hipFree(d_flat); hipFree(d_places); hipFree(d_state); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z18isingAnnealingStepPiS_S_S_ii .globl _Z18isingAnnealingStepPiS_S_S_ii .p2align 8 .type _Z18isingAnnealingStepPiS_S_S_ii,@function _Z18isingAnnealingStepPiS_S_S_ii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x34 s_load_b128 s[4:7], s[0:1], 0x18 v_mov_b32_e32 v6, 0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] v_mov_b32_e32 v0, s7 s_mov_b32 s2, exec_lo global_store_b32 v6, v0, s[4:5] v_cmpx_gt_i32_e64 s6, v1 s_cbranch_execz .LBB0_7 s_load_b128 s[8:11], s[0:1], 0x8 v_ashrrev_i32_e32 v2, 31, v1 s_mov_b32 s2, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s8, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s9, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s10, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s11, v1, vcc_lo global_load_b64 v[2:3], v[2:3], off global_load_b32 v7, v[0:1], off s_waitcnt vmcnt(1) v_cmpx_lt_i32_e64 v2, v3 s_cbranch_execz .LBB0_5 s_load_b64 s[0:1], s[0:1], 0x0 v_ashrrev_i32_e32 v5, 31, v2 v_mov_b32_e32 v4, v2 v_mov_b32_e32 v6, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[4:5] s_waitcnt lgkmcnt(0) v_add_co_u32 v4, vcc_lo, v4, s0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo s_mov_b32 s1, 0 v_add_co_u32 v4, vcc_lo, v4, 4 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo .p2align 6 .LBB0_3: s_clause 0x1 global_load_b32 v8, v[4:5], off offset:-4 global_load_b32 v10, v[4:5], off v_add_nc_u32_e32 v2, 2, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_ge_i32_e64 s0, v2, v3 s_or_b32 s1, s0, s1 s_waitcnt vmcnt(1) v_ashrrev_i32_e32 v9, 31, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[8:9], 2, v[8:9] v_add_co_u32 v8, vcc_lo, s10, v8 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v9, vcc_lo, s11, v9, vcc_lo v_add_co_u32 v4, vcc_lo, v4, 8 v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo global_load_b32 v8, v[8:9], off s_waitcnt vmcnt(0) v_mul_lo_u32 v8, v8, v7 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v8, v8, v10 v_sub_nc_u32_e32 v6, v6, v8 s_and_not1_b32 exec_lo, exec_lo, s1 s_cbranch_execnz .LBB0_3 s_or_b32 exec_lo, exec_lo, s1 .LBB0_5: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s2 s_waitcnt vmcnt(0) v_ashrrev_i32_e32 v2, 31, v7 s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv s_mov_b32 s1, 0 v_add_nc_u32_e32 v3, v7, v2 v_ashrrev_i32_e32 v7, 31, v6 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_xor_b32_e32 v3, v3, v2 v_add_nc_u32_e32 v6, v6, v7 v_xor_b32_e32 v2, v7, v2 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4) v_cvt_f32_u32_e32 v4, v3 v_sub_nc_u32_e32 v5, 0, v3 v_xor_b32_e32 v6, v6, v7 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_rcp_iflag_f32_e32 v4, v4 s_waitcnt_depctr 0xfff v_mul_f32_e32 v4, 0x4f7ffffe, v4 v_cvt_u32_f32_e32 v4, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v5, v5, v4 v_mul_hi_u32 v5, v4, v5 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v4, v4, v5 v_mul_hi_u32 v4, v6, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v5, v4, v3 v_sub_nc_u32_e32 v5, v6, v5 v_add_nc_u32_e32 v6, 1, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v8, v5, v3 v_cmp_ge_u32_e32 vcc_lo, v5, v3 v_dual_cndmask_b32 v5, v5, v8 :: v_dual_cndmask_b32 v4, v4, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_ge_u32_e32 vcc_lo, v5, v3 v_add_nc_u32_e32 v6, 1, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v3, v4, v6, vcc_lo v_xor_b32_e32 v3, v3, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_nc_u32_e32 v2, v3, v2 v_cmp_lt_i32_e32 vcc_lo, -1, v2 v_mov_b32_e32 v2, 0 v_cndmask_b32_e64 v3, 1, -1, vcc_lo global_store_b32 v[0:1], v3, off s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv global_load_b32 v0, v2, s[4:5] s_waitcnt vmcnt(0) v_cmp_lt_i32_e32 vcc_lo, 0, v0 s_cbranch_vccz .LBB0_7 v_cvt_f32_u32_e32 v0, s6 s_sub_i32 s2, 0, s6 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_rcp_iflag_f32_e32 v0, v0 s_waitcnt_depctr 0xfff v_mul_f32_e32 v0, 0x4f7ffffe, v0 v_cvt_u32_f32_e32 v0, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_readfirstlane_b32 s0, v0 s_mul_i32 s2, s2, s0 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_hi_u32 s2, s0, s2 s_add_i32 s0, s0, s2 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_hi_u32 s0, s0, 0x62d407df s_mul_i32 s0, s0, s6 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_sub_i32 s0, 0x62d407df, s0 s_sub_i32 s2, s0, s6 s_cmp_ge_u32 s0, s6 s_cselect_b32 s0, s2, s0 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) s_sub_i32 s2, s0, s6 s_cmp_ge_u32 s0, s6 s_cselect_b32 s0, s2, s0 s_lshl_b64 s[0:1], s[0:1], 2 s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s0, s10, s0 s_addc_u32 s1, s11, s1 global_load_b32 v0, v2, s[0:1] s_waitcnt vmcnt(0) v_cmp_eq_u32_e32 vcc_lo, 1, v0 v_cndmask_b32_e64 v0, 1, -1, vcc_lo global_store_b32 v2, v0, s[0:1] .LBB0_7: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18isingAnnealingStepPiS_S_S_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 11 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z18isingAnnealingStepPiS_S_S_ii, .Lfunc_end0-_Z18isingAnnealingStepPiS_S_S_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: by_value - .offset: 36 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18isingAnnealingStepPiS_S_S_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z18isingAnnealingStepPiS_S_S_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 11 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> const int SWEEPS = 100; void printArray(int* arr, int size) { for (int i = 0; i < size; i++) { printf("%d ", arr[i]); } printf("\n"); } int getSize(FILE* fptr) { int size = 0; int temp; while (fscanf(fptr, "%d", &temp) == 1) { size++; } return size; } void getArray(FILE* fptr, int* arr) { int i = 0; while (fscanf(fptr, "%d", &arr[i]) == 1) { i++; } } // CUDA kernel for performing Ising Annealing in parallel __global__ void isingAnnealingStep(int* d_flat, int* d_places, int* d_state, int* count, int vertices, int N){ *count = N; int vertex = blockDim.x*blockIdx.x + threadIdx.x; if(vertex < vertices){ int sigmaI = d_state[vertex]; int H = 0; for (int i = d_places[vertex]; i < d_places[vertex + 1]; i += 2) { int sigmaJ = d_state[d_flat[i]]; int J = d_flat[i + 1]; H -= (J * sigmaI * sigmaJ); } __syncthreads(); hiprandState_t random; hiprand_init(0, 0, 0, &random); int s = H / sigmaI; if (s > 0) { d_state[vertex] = -1; } else if (s < 0) { d_state[vertex] = 1; } else { d_state[vertex] = 1 - 2 * (hiprand(&random) % 2); } __syncthreads(); hiprand_init(0, 0, 0, &random); if(*count > 0){ int index = hiprand(&random) % vertices; if (d_state[index] == 1) { d_state[index] = -1; } else { d_state[index] = 1; } *count--; } } } // utility function to compute Hamiltonian given a vertex // ***not used in the parallel implementation*** int computeHamiltonian(int* flat, int* places, int* state, int vertex) { int sigmaI = state[vertex]; int H = 0; for (int i = places[vertex]; i < places[vertex + 1]; i += 2) { int sigmaJ = state[flat[i]]; int J = flat[i + 1]; H -= (J * sigmaI * sigmaJ); } return H; } int main(){ FILE* fptr; // load array $flat fptr = fopen("flat.txt", "r"); if (fptr == NULL) { printf("Error!"); return -1; } // get number of integers for dynamic memory allocation on host int fsize = getSize(fptr); printf("Number of integers present in flat.txt: %d\n", fsize); // read values in text file to array $flat int* flat = (int*)malloc(sizeof(int) * fsize); fseek(fptr, 0, SEEK_SET); getArray(fptr, flat); //printArray(flat, fsize); fclose(fptr); /**********/ // load array $places fptr = fopen("places.txt", "r"); if (fptr == NULL) { printf("Error!"); return -1; } // get number of integers for dynamic memory allocation on host int psize = getSize(fptr); printf("Number of integers present in places.txt: %d\n", psize); // read values in text file to array $places int* places = (int*)malloc(sizeof(int) * psize); fseek(fptr, 0, SEEK_SET); getArray(fptr, places); //printArray(places, psize); fclose(fptr); /**********/ int vertices = psize - 1; int* state = (int*)malloc(sizeof(int) * vertices); printf("Number of vertices: %d\n", vertices); // initialize states randomly for (int i = 0; i < vertices; ++i) { state[i] = 1 - 2 * (rand() % 2); } double initial_energy = 0; for (int i = 0; i < vertices; i++) { int H = computeHamiltonian(flat, places, state, i); initial_energy += H; } printf("Initial energy: %f\n", initial_energy / 2); // allocate memory on device int* d_flat; int* d_places; int* d_state; int* count; hipMalloc((void**)&count, sizeof(int)); hipMalloc((void**)&d_flat, fsize * sizeof(int)); hipMalloc((void**)&d_places, psize*sizeof(int)); hipMalloc((void**)&d_state, vertices*sizeof(int)); hipMemcpy(d_flat, flat, fsize*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_places, places, psize*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_state, state, vertices*sizeof(int), hipMemcpyHostToDevice); int threads = 1024; int blocks = (vertices / 1024) + 1; int N = vertices*0.75; clock_t begin = clock(); for (int i = 0; i < SWEEPS; i++) { isingAnnealingStep<<<blocks, threads>>>(d_flat, d_places, d_state, count, vertices, N); //cudaDeviceSynchronize(); N *= 0.9; } clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; hipMemcpy(flat, d_flat, fsize*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(places, d_places, psize*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(state, d_state, vertices*sizeof(int), hipMemcpyDeviceToHost); double final_energy = 0; for (int i = 0; i < vertices; i++) { int H = computeHamiltonian(flat, places, state, i); final_energy += H; } printf("Final energy: %f\n", final_energy / 2); printf("Time taken for parallel Ising Annealing: %f seconds\n", time_spent); // store results in txt files // $time_spent $initial_energy $final_energy printf("Storing parallel code's results in results.txt...\n"); fptr = fopen("results.txt", "a"); fprintf(fptr, "Parallel\t%d\t%f\t%f\t%f\n", vertices, time_spent, initial_energy, final_energy); fclose(fptr); printf("Finished writing to results.txt\n"); // free dynamic memory on host and device free(flat); free(places); free(state); hipFree(d_flat); hipFree(d_places); hipFree(d_state); return 0; }
.text .file "ising_annealing_parallel.hip" .globl _Z10printArrayPii # -- Begin function _Z10printArrayPii .p2align 4, 0x90 .type _Z10printArrayPii,@function _Z10printArrayPii: # @_Z10printArrayPii .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB0_4 # %bb.1: # %.lr.ph.preheader pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %rbx movl %esi, %r14d xorl %r15d, %r15d .p2align 4, 0x90 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movl (%rbx,%r15,4), %esi movl $.L.str, %edi xorl %eax, %eax callq printf incq %r15 cmpq %r15, %r14 jne .LBB0_2 # %bb.3: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r14 .cfi_restore %r15 .LBB0_4: # %._crit_edge movl $10, %edi jmp putchar@PLT # TAILCALL .Lfunc_end0: .size _Z10printArrayPii, .Lfunc_end0-_Z10printArrayPii .cfi_endproc # -- End function .globl _Z7getSizeP8_IO_FILE # -- Begin function _Z7getSizeP8_IO_FILE .p2align 4, 0x90 .type _Z7getSizeP8_IO_FILE,@function _Z7getSizeP8_IO_FILE: # @_Z7getSizeP8_IO_FILE .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $16, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %r14 movl $-1, %ebx leaq 12(%rsp), %r15 .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movl $.L.str.2, %esi movq %r14, %rdi movq %r15, %rdx xorl %eax, %eax callq __isoc23_fscanf incl %ebx cmpl $1, %eax je .LBB1_1 # %bb.2: movl %ebx, %eax addq $16, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z7getSizeP8_IO_FILE, .Lfunc_end1-_Z7getSizeP8_IO_FILE .cfi_endproc # -- End function .globl _Z8getArrayP8_IO_FILEPi # -- Begin function _Z8getArrayP8_IO_FILEPi .p2align 4, 0x90 .type _Z8getArrayP8_IO_FILEPi,@function _Z8getArrayP8_IO_FILEPi: # @_Z8getArrayP8_IO_FILEPi .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movq %rsi, %rbx movq %rdi, %r14 .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 movl $.L.str.2, %esi movq %r14, %rdi movq %rbx, %rdx xorl %eax, %eax callq __isoc23_fscanf addq $4, %rbx cmpl $1, %eax je .LBB2_1 # %bb.2: addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z8getArrayP8_IO_FILEPi, .Lfunc_end2-_Z8getArrayP8_IO_FILEPi .cfi_endproc # -- End function .globl _Z33__device_stub__isingAnnealingStepPiS_S_S_ii # -- Begin function _Z33__device_stub__isingAnnealingStepPiS_S_S_ii .p2align 4, 0x90 .type _Z33__device_stub__isingAnnealingStepPiS_S_S_ii,@function _Z33__device_stub__isingAnnealingStepPiS_S_S_ii: # @_Z33__device_stub__isingAnnealingStepPiS_S_S_ii .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movl %r8d, 12(%rsp) movl %r9d, 8(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z18isingAnnealingStepPiS_S_S_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end3: .size _Z33__device_stub__isingAnnealingStepPiS_S_S_ii, .Lfunc_end3-_Z33__device_stub__isingAnnealingStepPiS_S_S_ii .cfi_endproc # -- End function .globl _Z18computeHamiltonianPiS_S_i # -- Begin function _Z18computeHamiltonianPiS_S_i .p2align 4, 0x90 .type _Z18computeHamiltonianPiS_S_i,@function _Z18computeHamiltonianPiS_S_i: # @_Z18computeHamiltonianPiS_S_i .cfi_startproc # %bb.0: movslq %ecx, %r8 movslq (%rsi,%r8,4), %rcx movslq 4(%rsi,%r8,4), %rsi xorl %eax, %eax cmpl %esi, %ecx jge .LBB4_3 # %bb.1: # %.lr.ph.preheader movl (%rdx,%r8,4), %r8d xorl %eax, %eax .p2align 4, 0x90 .LBB4_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movslq (%rdi,%rcx,4), %r9 movl (%rdx,%r9,4), %r9d imull %r8d, %r9d imull 4(%rdi,%rcx,4), %r9d subl %r9d, %eax addq $2, %rcx cmpq %rsi, %rcx jl .LBB4_2 .LBB4_3: # %._crit_edge retq .Lfunc_end4: .size _Z18computeHamiltonianPiS_S_i, .Lfunc_end4-_Z18computeHamiltonianPiS_S_i .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI5_0: .quad 0x3fe0000000000000 # double 0.5 .LCPI5_1: .quad 0x3fe8000000000000 # double 0.75 .LCPI5_2: .quad 0x3feccccccccccccd # double 0.90000000000000002 .LCPI5_3: .quad 0x412e848000000000 # double 1.0E+6 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $264, %rsp # imm = 0x108 .cfi_def_cfa_offset 320 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $.L.str.3, %edi movl $.L.str.4, %esi callq fopen testq %rax, %rax je .LBB5_1 # %bb.2: movq %rax, %r14 movq $-4, %r12 movl $-1, %ebx leaq 208(%rsp), %r15 .p2align 4, 0x90 .LBB5_3: # =>This Inner Loop Header: Depth=1 movl $.L.str.2, %esi movq %r14, %rdi movq %r15, %rdx xorl %eax, %eax callq __isoc23_fscanf addq $4, %r12 incl %ebx cmpl $1, %eax je .LBB5_3 # %bb.4: # %_Z7getSizeP8_IO_FILE.exit movl $.L.str.6, %edi movl %ebx, %esi xorl %eax, %eax callq printf movq %r12, %rdi callq malloc movq %rax, %rbx movq %r14, %rdi xorl %esi, %esi xorl %edx, %edx callq fseek movq %rbx, %r15 .p2align 4, 0x90 .LBB5_5: # =>This Inner Loop Header: Depth=1 movl $.L.str.2, %esi movq %r14, %rdi movq %r15, %rdx xorl %eax, %eax callq __isoc23_fscanf addq $4, %r15 cmpl $1, %eax je .LBB5_5 # %bb.6: # %_Z8getArrayP8_IO_FILEPi.exit movq %r14, %rdi callq fclose movl $.L.str.7, %edi movl $.L.str.4, %esi callq fopen testq %rax, %rax je .LBB5_1 # %bb.7: movq %rax, %r15 movl $-2, %r14d movq $-4, %rbp leaq 208(%rsp), %r13 .p2align 4, 0x90 .LBB5_8: # =>This Inner Loop Header: Depth=1 movl $.L.str.2, %esi movq %r15, %rdi movq %r13, %rdx xorl %eax, %eax callq __isoc23_fscanf incl %r14d addq $4, %rbp cmpl $1, %eax je .LBB5_8 # %bb.9: # %_Z7getSizeP8_IO_FILE.exit94 leal 1(%r14), %esi movl $.L.str.8, %edi movl %esi, 12(%rsp) # 4-byte Spill xorl %eax, %eax callq printf movq %rbp, %rdi callq malloc movq %rax, %r13 movq %r15, %rdi xorl %esi, %esi xorl %edx, %edx callq fseek movq %r13, 80(%rsp) # 8-byte Spill .p2align 4, 0x90 .LBB5_10: # =>This Inner Loop Header: Depth=1 movl $.L.str.2, %esi movq %r15, %rdi movq %r13, %rdx xorl %eax, %eax callq __isoc23_fscanf addq $4, %r13 cmpl $1, %eax je .LBB5_10 # %bb.11: # %_Z8getArrayP8_IO_FILEPi.exit97 movq %r15, %rdi callq fclose leaq -4(%rbp), %rdi movq %rdi, 72(%rsp) # 8-byte Spill callq malloc movq %rax, %r13 movl $.L.str.9, %edi movl %r14d, %esi xorl %eax, %eax callq printf movq %r14, 48(%rsp) # 8-byte Spill movl %r14d, %r9d cmpl $2, 12(%rsp) # 4-byte Folded Reload movq %r9, 88(%rsp) # 8-byte Spill jb .LBB5_14 # %bb.12: # %.lr.ph.preheader xorl %r14d, %r14d .p2align 4, 0x90 .LBB5_13: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand movq 88(%rsp), %r9 # 8-byte Reload movl %eax, %ecx shrl $31, %ecx addl %eax, %ecx andl $2147483646, %ecx # imm = 0x7FFFFFFE subl %ecx, %eax addl %eax, %eax movl $1, %ecx subl %eax, %ecx movl %ecx, (%r13,%r14,4) incq %r14 cmpq %r14, %r9 jne .LBB5_13 .LBB5_14: # %.preheader xorpd %xmm1, %xmm1 cmpl $2, 12(%rsp) # 4-byte Folded Reload movq 80(%rsp), %r15 # 8-byte Reload jb .LBB5_20 # %bb.15: # %.lr.ph114.preheader xorl %eax, %eax jmp .LBB5_16 .p2align 4, 0x90 .LBB5_19: # %_Z18computeHamiltonianPiS_S_i.exit # in Loop: Header=BB5_16 Depth=1 xorps %xmm0, %xmm0 cvtsi2sd %esi, %xmm0 addsd %xmm0, %xmm1 incq %rax cmpq %r9, %rax je .LBB5_20 .LBB5_16: # %.lr.ph114 # =>This Loop Header: Depth=1 # Child Loop BB5_18 Depth 2 movslq (%r15,%rax,4), %rcx movslq 4(%r15,%rax,4), %rdx movl $0, %esi cmpl %edx, %ecx jge .LBB5_19 # %bb.17: # %.lr.ph.preheader.i # in Loop: Header=BB5_16 Depth=1 movl (%r13,%rax,4), %edi xorl %esi, %esi .p2align 4, 0x90 .LBB5_18: # %.lr.ph.i # Parent Loop BB5_16 Depth=1 # => This Inner Loop Header: Depth=2 movslq (%rbx,%rcx,4), %r8 movl (%r13,%r8,4), %r8d imull %edi, %r8d imull 4(%rbx,%rcx,4), %r8d subl %r8d, %esi addq $2, %rcx cmpq %rdx, %rcx jl .LBB5_18 jmp .LBB5_19 .LBB5_1: movl $.L.str.5, %edi xorl %eax, %eax callq printf movl $-1, %eax jmp .LBB5_31 .LBB5_20: # %._crit_edge movsd .LCPI5_0(%rip), %xmm0 # xmm0 = mem[0],zero movsd %xmm1, 112(%rsp) # 8-byte Spill mulsd %xmm1, %xmm0 movl $.L.str.10, %edi movb $1, %al callq printf leaq 120(%rsp), %rdi movl $4, %esi callq hipMalloc leaq 40(%rsp), %rdi movq %r12, %rsi callq hipMalloc leaq 32(%rsp), %rdi movq %rbp, %rsi callq hipMalloc leaq 24(%rsp), %rdi movq 72(%rsp), %r14 # 8-byte Reload movq %r14, %rsi callq hipMalloc movq 40(%rsp), %rdi movq %rbx, %rsi movq %r12, %rdx movl $1, %ecx callq hipMemcpy movq 32(%rsp), %rdi movq %r15, %rsi movq %rbp, %rdx movl $1, %ecx callq hipMemcpy movq 24(%rsp), %rdi movq %r13, %rsi movq %r14, %rdx movl $1, %ecx callq hipMemcpy movq 48(%rsp), %rax # 8-byte Reload leal 1023(%rax), %r14d testl %eax, %eax cmovnsl %eax, %r14d sarl $10, %r14d incl %r14d xorps %xmm0, %xmm0 cvtsi2sd %eax, %xmm0 mulsd .LCPI5_1(%rip), %xmm0 movsd %xmm0, 96(%rsp) # 8-byte Spill callq clock movsd 96(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movq %rax, 104(%rsp) # 8-byte Spill movabsq $4294967296, %rax # imm = 0x100000000 orq %rax, %r14 movq %r14, 56(%rsp) # 8-byte Spill movl $100, %r15d addq $1024, %rax # imm = 0x400 movq %rax, 16(%rsp) # 8-byte Spill jmp .LBB5_21 .p2align 4, 0x90 .LBB5_23: # in Loop: Header=BB5_21 Depth=1 xorps %xmm0, %xmm0 cvtsi2sd %r14d, %xmm0 mulsd .LCPI5_2(%rip), %xmm0 decl %r15d je .LBB5_24 .LBB5_21: # =>This Inner Loop Header: Depth=1 cvttsd2si %xmm0, %r14d movq 56(%rsp), %rdi # 8-byte Reload movl $1, %esi movq 16(%rsp), %rdx # 8-byte Reload movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_23 # %bb.22: # in Loop: Header=BB5_21 Depth=1 movq 40(%rsp), %rax movq 32(%rsp), %rcx movq 24(%rsp), %rdx movq 120(%rsp), %rsi movq %rax, 200(%rsp) movq %rcx, 192(%rsp) movq %rdx, 184(%rsp) movq %rsi, 176(%rsp) movq 48(%rsp), %rax # 8-byte Reload movl %eax, 68(%rsp) movl %r14d, 64(%rsp) leaq 200(%rsp), %rax movq %rax, 208(%rsp) leaq 192(%rsp), %rax movq %rax, 216(%rsp) leaq 184(%rsp), %rax movq %rax, 224(%rsp) leaq 176(%rsp), %rax movq %rax, 232(%rsp) leaq 68(%rsp), %rax movq %rax, 240(%rsp) leaq 64(%rsp), %rax movq %rax, 248(%rsp) leaq 160(%rsp), %rdi leaq 144(%rsp), %rsi leaq 136(%rsp), %rdx leaq 128(%rsp), %rcx callq __hipPopCallConfiguration movq 160(%rsp), %rsi movl 168(%rsp), %edx movq 144(%rsp), %rcx movl 152(%rsp), %r8d movl $_Z18isingAnnealingStepPiS_S_S_ii, %edi leaq 208(%rsp), %r9 pushq 128(%rsp) .cfi_adjust_cfa_offset 8 pushq 144(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 jmp .LBB5_23 .LBB5_24: callq clock subq 104(%rsp), %rax # 8-byte Folded Reload xorps %xmm0, %xmm0 cvtsi2sd %rax, %xmm0 divsd .LCPI5_3(%rip), %xmm0 movsd %xmm0, 16(%rsp) # 8-byte Spill movq 40(%rsp), %rsi movq %rbx, %rdi movq %r12, %rdx movl $2, %ecx callq hipMemcpy movq 32(%rsp), %rsi movq 80(%rsp), %r12 # 8-byte Reload movq %r12, %rdi movq %rbp, %rdx movl $2, %ecx callq hipMemcpy movq 24(%rsp), %rsi movq %r13, %rdi movq 72(%rsp), %rdx # 8-byte Reload movl $2, %ecx callq hipMemcpy xorpd %xmm1, %xmm1 cmpl $2, 12(%rsp) # 4-byte Folded Reload movq 48(%rsp), %r14 # 8-byte Reload movq 88(%rsp), %r9 # 8-byte Reload jb .LBB5_30 # %bb.25: # %.lr.ph121.preheader xorl %eax, %eax jmp .LBB5_26 .p2align 4, 0x90 .LBB5_29: # %_Z18computeHamiltonianPiS_S_i.exit106 # in Loop: Header=BB5_26 Depth=1 xorps %xmm0, %xmm0 cvtsi2sd %esi, %xmm0 addsd %xmm0, %xmm1 incq %rax cmpq %r9, %rax je .LBB5_30 .LBB5_26: # %.lr.ph121 # =>This Loop Header: Depth=1 # Child Loop BB5_28 Depth 2 movslq (%r12,%rax,4), %rcx movslq 4(%r12,%rax,4), %rdx movl $0, %esi cmpl %edx, %ecx jge .LBB5_29 # %bb.27: # %.lr.ph.preheader.i101 # in Loop: Header=BB5_26 Depth=1 movl (%r13,%rax,4), %edi xorl %esi, %esi .p2align 4, 0x90 .LBB5_28: # %.lr.ph.i102 # Parent Loop BB5_26 Depth=1 # => This Inner Loop Header: Depth=2 movslq (%rbx,%rcx,4), %r8 movl (%r13,%r8,4), %r8d imull %edi, %r8d imull 4(%rbx,%rcx,4), %r8d subl %r8d, %esi addq $2, %rcx cmpq %rdx, %rcx jl .LBB5_28 jmp .LBB5_29 .LBB5_30: # %._crit_edge122 movsd %xmm1, 56(%rsp) # 8-byte Spill movsd .LCPI5_0(%rip), %xmm0 # xmm0 = mem[0],zero mulsd %xmm1, %xmm0 movl $.L.str.11, %edi movb $1, %al callq printf movl $.L.str.12, %edi movsd 16(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movb $1, %al callq printf movl $.Lstr, %edi callq puts@PLT movl $.L.str.14, %edi movl $.L.str.15, %esi callq fopen movq %rax, %r15 movl $.L.str.16, %esi movq %rax, %rdi movl %r14d, %edx movsd 16(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movsd 112(%rsp), %xmm1 # 8-byte Reload # xmm1 = mem[0],zero movsd 56(%rsp), %xmm2 # 8-byte Reload # xmm2 = mem[0],zero movb $3, %al callq fprintf movq %r15, %rdi callq fclose movl $.Lstr.1, %edi callq puts@PLT movq %rbx, %rdi callq free movq %r12, %rdi callq free movq %r13, %rdi callq free movq 40(%rsp), %rdi callq hipFree movq 32(%rsp), %rdi callq hipFree movq 24(%rsp), %rdi callq hipFree xorl %eax, %eax .LBB5_31: addq $264, %rsp # imm = 0x108 .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size main, .Lfunc_end5-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18isingAnnealingStepPiS_S_S_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%d " .size .L.str, 4 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%d" .size .L.str.2, 3 .type _Z18isingAnnealingStepPiS_S_S_ii,@object # @_Z18isingAnnealingStepPiS_S_S_ii .section .rodata,"a",@progbits .globl _Z18isingAnnealingStepPiS_S_S_ii .p2align 3, 0x0 _Z18isingAnnealingStepPiS_S_S_ii: .quad _Z33__device_stub__isingAnnealingStepPiS_S_S_ii .size _Z18isingAnnealingStepPiS_S_S_ii, 8 .type .L.str.3,@object # @.str.3 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.3: .asciz "flat.txt" .size .L.str.3, 9 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "r" .size .L.str.4, 2 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "Error!" .size .L.str.5, 7 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "Number of integers present in flat.txt: %d\n" .size .L.str.6, 44 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "places.txt" .size .L.str.7, 11 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "Number of integers present in places.txt: %d\n" .size .L.str.8, 46 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz "Number of vertices: %d\n" .size .L.str.9, 24 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "Initial energy: %f\n" .size .L.str.10, 20 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz "Final energy: %f\n" .size .L.str.11, 18 .type .L.str.12,@object # @.str.12 .L.str.12: .asciz "Time taken for parallel Ising Annealing: %f seconds\n" .size .L.str.12, 53 .type .L.str.14,@object # @.str.14 .L.str.14: .asciz "results.txt" .size .L.str.14, 12 .type .L.str.15,@object # @.str.15 .L.str.15: .asciz "a" .size .L.str.15, 2 .type .L.str.16,@object # @.str.16 .L.str.16: .asciz "Parallel\t%d\t%f\t%f\t%f\n" .size .L.str.16, 22 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z18isingAnnealingStepPiS_S_S_ii" .size .L__unnamed_1, 33 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Storing parallel code's results in results.txt..." .size .Lstr, 50 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "Finished writing to results.txt" .size .Lstr.1, 32 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z33__device_stub__isingAnnealingStepPiS_S_S_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z18isingAnnealingStepPiS_S_S_ii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0018d839_00000000-6_ising_annealing_parallel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2278: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2278: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%d " .LC1: .string "\n" .text .globl _Z10printArrayPii .type _Z10printArrayPii, @function _Z10printArrayPii: .LFB2271: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 testl %esi, %esi jle .L4 movq %rdi, %rbx movslq %esi, %rsi leaq (%rdi,%rsi,4), %r12 leaq .LC0(%rip), %rbp .L5: movl (%rbx), %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $4, %rbx cmpq %r12, %rbx jne .L5 .L4: leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2271: .size _Z10printArrayPii, .-_Z10printArrayPii .section .rodata.str1.1 .LC2: .string "%d" .text .globl _Z7getSizeP8_IO_FILE .type _Z7getSizeP8_IO_FILE, @function _Z7getSizeP8_IO_FILE: .LFB2272: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $16, %rsp .cfi_def_cfa_offset 48 movq %rdi, %rbp movq %fs:40, %rax movq %rax, 8(%rsp) xorl %eax, %eax movl $0, %ebx leaq .LC2(%rip), %r12 jmp .L9 .L10: addl $1, %ebx .L9: leaq 4(%rsp), %rdx movq %r12, %rsi movq %rbp, %rdi movl $0, %eax call __isoc23_fscanf@PLT cmpl $1, %eax je .L10 movq 8(%rsp), %rax subq %fs:40, %rax jne .L13 movl %ebx, %eax addq $16, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L13: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2272: .size _Z7getSizeP8_IO_FILE, .-_Z7getSizeP8_IO_FILE .globl _Z8getArrayP8_IO_FILEPi .type _Z8getArrayP8_IO_FILEPi, @function _Z8getArrayP8_IO_FILEPi: .LFB2273: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movq %rdi, %rbp movq %rsi, %rbx leaq .LC2(%rip), %r12 .L15: movq %rbx, %rdx movq %r12, %rsi movq %rbp, %rdi movl $0, %eax call __isoc23_fscanf@PLT addq $4, %rbx cmpl $1, %eax je .L15 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2273: .size _Z8getArrayP8_IO_FILEPi, .-_Z8getArrayP8_IO_FILEPi .globl _Z18computeHamiltonianPiS_S_i .type _Z18computeHamiltonianPiS_S_i, @function _Z18computeHamiltonianPiS_S_i: .LFB2274: .cfi_startproc endbr64 movq %rdi, %r9 movq %rdx, %rdi movslq %ecx, %rcx movl (%rdx,%rcx,4), %r8d movl (%rsi,%rcx,4), %r10d movl 4(%rsi,%rcx,4), %edx cmpl %edx, %r10d jge .L21 movslq %r10d, %rcx leaq (%r9,%rcx,4), %rax subl %r10d, %edx subl $1, %edx shrl %edx leal (%rdx,%rdx), %edx addq %rcx, %rdx leaq 8(%r9,%rdx,4), %r9 movl $0, %ecx .L20: movslq (%rax), %rsi movl %r8d, %edx imull 4(%rax), %edx imull (%rdi,%rsi,4), %edx subl %edx, %ecx addq $8, %rax cmpq %r9, %rax jne .L20 .L18: movl %ecx, %eax ret .L21: movl $0, %ecx jmp .L18 .cfi_endproc .LFE2274: .size _Z18computeHamiltonianPiS_S_i, .-_Z18computeHamiltonianPiS_S_i .globl _Z46__device_stub__Z18isingAnnealingStepPiS_S_S_iiPiS_S_S_ii .type _Z46__device_stub__Z18isingAnnealingStepPiS_S_S_iiPiS_S_S_ii, @function _Z46__device_stub__Z18isingAnnealingStepPiS_S_S_iiPiS_S_S_ii: .LFB2300: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movl %r8d, 12(%rsp) movl %r9d, 8(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 12(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L27 .L23: movq 168(%rsp), %rax subq %fs:40, %rax jne .L28 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L27: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z18isingAnnealingStepPiS_S_S_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L23 .L28: call __stack_chk_fail@PLT .cfi_endproc .LFE2300: .size _Z46__device_stub__Z18isingAnnealingStepPiS_S_S_iiPiS_S_S_ii, .-_Z46__device_stub__Z18isingAnnealingStepPiS_S_S_iiPiS_S_S_ii .globl _Z18isingAnnealingStepPiS_S_S_ii .type _Z18isingAnnealingStepPiS_S_S_ii, @function _Z18isingAnnealingStepPiS_S_S_ii: .LFB2301: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z46__device_stub__Z18isingAnnealingStepPiS_S_S_iiPiS_S_S_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2301: .size _Z18isingAnnealingStepPiS_S_S_ii, .-_Z18isingAnnealingStepPiS_S_S_ii .section .rodata.str1.1 .LC4: .string "r" .LC5: .string "flat.txt" .LC6: .string "Error!" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC7: .string "Number of integers present in flat.txt: %d\n" .section .rodata.str1.1 .LC8: .string "places.txt" .section .rodata.str1.8 .align 8 .LC9: .string "Number of integers present in places.txt: %d\n" .section .rodata.str1.1 .LC10: .string "Number of vertices: %d\n" .LC12: .string "Initial energy: %f\n" .LC16: .string "Final energy: %f\n" .section .rodata.str1.8 .align 8 .LC17: .string "Time taken for parallel Ising Annealing: %f seconds\n" .align 8 .LC18: .string "Storing parallel code's results in results.txt...\n" .section .rodata.str1.1 .LC19: .string "a" .LC20: .string "results.txt" .LC21: .string "Parallel\t%d\t%f\t%f\t%f\n" .section .rodata.str1.8 .align 8 .LC22: .string "Finished writing to results.txt\n" .text .globl main .type main, @function main: .LFB2275: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $136, %rsp .cfi_def_cfa_offset 192 movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq .LC4(%rip), %rsi leaq .LC5(%rip), %rdi call fopen@PLT testq %rax, %rax je .L50 movq %rax, %rbx movq %rax, %rdi call _Z7getSizeP8_IO_FILE movl %eax, %ebp movl %eax, %edx leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movslq %ebp, %rbp leaq 0(,%rbp,4), %rax movq %rax, 8(%rsp) movq %rax, %rdi call malloc@PLT movq %rax, %r15 movl $0, %edx movl $0, %esi movq %rbx, %rdi call fseek@PLT movq %r15, %rsi movq %rbx, %rdi call _Z8getArrayP8_IO_FILEPi movq %rbx, %rdi call fclose@PLT leaq .LC4(%rip), %rsi leaq .LC8(%rip), %rdi call fopen@PLT movq %rax, %rbx testq %rax, %rax je .L51 movq %rax, %rdi call _Z7getSizeP8_IO_FILE movl %eax, %ebp movl %eax, 52(%rsp) movl %eax, %edx leaq .LC9(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movslq %ebp, %rax leaq 0(,%rax,4), %r14 movq %r14, 32(%rsp) movq %r14, %rdi call malloc@PLT movq %rax, %r13 movq %rax, 24(%rsp) movl $0, %edx movl $0, %esi movq %rbx, %rdi call fseek@PLT movq %r13, %rsi movq %rbx, %rdi call _Z8getArrayP8_IO_FILEPi movq %rbx, %rdi call fclose@PLT leal -1(%rbp), %r13d leaq -4(%r14), %rax movq %rax, 40(%rsp) movq %rax, %rdi call malloc@PLT movq %rax, %r14 movl %r13d, %edx leaq .LC10(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT testl %r13d, %r13d jle .L43 movq %r14, %rbx leal -2(%rbp), %eax leaq 4(%r14,%rax,4), %r12 movl $1, %ebp .L36: call rand@PLT movl %eax, %edx shrl $31, %edx addl %edx, %eax andl $1, %eax subl %edx, %eax addl %eax, %eax movl %ebp, %edx subl %eax, %edx movl %edx, (%rbx) addq $4, %rbx cmpq %r12, %rbx jne .L36 movl 52(%rsp), %eax leal -1(%rax), %ebp movl $0, %ebx movq $0x000000000, 16(%rsp) movq 24(%rsp), %r12 .L37: movl %ebx, %ecx movq %r14, %rdx movq %r12, %rsi movq %r15, %rdi call _Z18computeHamiltonianPiS_S_i pxor %xmm0, %xmm0 cvtsi2sdl %eax, %xmm0 addsd 16(%rsp), %xmm0 movsd %xmm0, 16(%rsp) addl $1, %ebx cmpl %ebx, %ebp jne .L37 .L35: movsd 16(%rsp), %xmm0 mulsd .LC11(%rip), %xmm0 leaq .LC12(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT leaq 88(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT leaq 64(%rsp), %rdi movq 8(%rsp), %r12 movq %r12, %rsi call cudaMalloc@PLT leaq 72(%rsp), %rdi movq 32(%rsp), %rbp movq %rbp, %rsi call cudaMalloc@PLT leaq 80(%rsp), %rdi movq 40(%rsp), %rbx movq %rbx, %rsi call cudaMalloc@PLT movl $1, %ecx movq %r12, %rdx movq %r15, %rsi movq 64(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbp, %rdx movq 24(%rsp), %rsi movq 72(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %r14, %rsi movq 80(%rsp), %rdi call cudaMemcpy@PLT leal 1023(%r13), %r12d testl %r13d, %r13d cmovns %r13d, %r12d sarl $10, %r12d addl $1, %r12d pxor %xmm0, %xmm0 cvtsi2sdl %r13d, %xmm0 mulsd .LC13(%rip), %xmm0 cvttsd2sil %xmm0, %ebp call clock@PLT movq %rax, 56(%rsp) movl $100, %ebx jmp .L39 .L50: leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $-1, %eax jmp .L31 .L51: leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $-1, %eax jmp .L31 .L43: movq $0x000000000, 16(%rsp) jmp .L35 .L38: pxor %xmm0, %xmm0 cvtsi2sdl %ebp, %xmm0 mulsd .LC14(%rip), %xmm0 cvttsd2sil %xmm0, %ebp subl $1, %ebx je .L52 .L39: movl $1024, 108(%rsp) movl $1, 112(%rsp) movl %r12d, 96(%rsp) movl $1, 100(%rsp) movl $0, %r9d movl $0, %r8d movq 108(%rsp), %rdx movl $1, %ecx movq 96(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L38 movl %ebp, %r9d movl %r13d, %r8d movq 88(%rsp), %rcx movq 80(%rsp), %rdx movq 72(%rsp), %rsi movq 64(%rsp), %rdi call _Z46__device_stub__Z18isingAnnealingStepPiS_S_S_iiPiS_S_S_ii jmp .L38 .L52: call clock@PLT movq 56(%rsp), %rcx subq %rcx, %rax pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 divsd .LC15(%rip), %xmm0 movq %xmm0, %r12 movl $2, %ecx movq 8(%rsp), %rdx movq 64(%rsp), %rsi movq %r15, %rdi call cudaMemcpy@PLT movl $2, %ecx movq 32(%rsp), %rdx movq 72(%rsp), %rsi movq 24(%rsp), %rdi call cudaMemcpy@PLT movl $2, %ecx movq 40(%rsp), %rdx movq 80(%rsp), %rsi movq %r14, %rdi call cudaMemcpy@PLT testl %r13d, %r13d jle .L44 movl 52(%rsp), %ebp subl $1, %ebp movl $0, %ebx movq $0x000000000, 8(%rsp) movq %r12, 32(%rsp) movq 24(%rsp), %r12 .L41: movl %ebx, %ecx movq %r14, %rdx movq %r12, %rsi movq %r15, %rdi call _Z18computeHamiltonianPiS_S_i pxor %xmm0, %xmm0 cvtsi2sdl %eax, %xmm0 addsd 8(%rsp), %xmm0 movsd %xmm0, 8(%rsp) addl $1, %ebx cmpl %ebx, %ebp jne .L41 movq 32(%rsp), %r12 .L40: movsd 8(%rsp), %xmm0 mulsd .LC11(%rip), %xmm0 leaq .LC16(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq %r12, %xmm0 leaq .LC17(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT leaq .LC18(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC19(%rip), %rsi leaq .LC20(%rip), %rdi call fopen@PLT movq %rax, %rbx movsd 8(%rsp), %xmm2 movsd 16(%rsp), %xmm1 movq %r12, %xmm0 movl %r13d, %ecx leaq .LC21(%rip), %rdx movl $2, %esi movq %rax, %rdi movl $3, %eax call __fprintf_chk@PLT movq %rbx, %rdi call fclose@PLT leaq .LC22(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %r15, %rdi call free@PLT movq 24(%rsp), %rdi call free@PLT movq %r14, %rdi call free@PLT movq 64(%rsp), %rdi call cudaFree@PLT movq 72(%rsp), %rdi call cudaFree@PLT movq 80(%rsp), %rdi call cudaFree@PLT movl $0, %eax .L31: movq 120(%rsp), %rdx subq %fs:40, %rdx jne .L53 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L44: .cfi_restore_state movq $0x000000000, 8(%rsp) jmp .L40 .L53: call __stack_chk_fail@PLT .cfi_endproc .LFE2275: .size main, .-main .section .rodata.str1.8 .align 8 .LC23: .string "_Z18isingAnnealingStepPiS_S_S_ii" .section .rodata.str1.1 .LC24: .string "precalc_xorwow_matrix" .LC25: .string "precalc_xorwow_offset_matrix" .LC26: .string "mrg32k3aM1" .LC27: .string "mrg32k3aM2" .LC28: .string "mrg32k3aM1SubSeq" .LC29: .string "mrg32k3aM2SubSeq" .LC30: .string "mrg32k3aM1Seq" .LC31: .string "mrg32k3aM2Seq" .LC32: .string "__cr_lgamma_table" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2303: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC23(%rip), %rdx movq %rdx, %rcx leaq _Z18isingAnnealingStepPiS_S_S_ii(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $102400, %r9d movl $0, %r8d leaq .LC24(%rip), %rdx movq %rdx, %rcx leaq _ZL21precalc_xorwow_matrix(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $102400, %r9d movl $0, %r8d leaq .LC25(%rip), %rdx movq %rdx, %rcx leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC26(%rip), %rdx movq %rdx, %rcx leaq _ZL10mrg32k3aM1(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC27(%rip), %rdx movq %rdx, %rcx leaq _ZL10mrg32k3aM2(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2016, %r9d movl $0, %r8d leaq .LC28(%rip), %rdx movq %rdx, %rcx leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2016, %r9d movl $0, %r8d leaq .LC29(%rip), %rdx movq %rdx, %rcx leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC30(%rip), %rdx movq %rdx, %rcx leaq _ZL13mrg32k3aM1Seq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $2304, %r9d movl $0, %r8d leaq .LC31(%rip), %rdx movq %rdx, %rcx leaq _ZL13mrg32k3aM2Seq(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $1 .cfi_def_cfa_offset 32 movl $72, %r9d movl $0, %r8d leaq .LC32(%rip), %rdx movq %rdx, %rcx leaq _ZL17__cr_lgamma_table(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2303: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .local _ZL17__cr_lgamma_table .comm _ZL17__cr_lgamma_table,72,32 .local _ZL13mrg32k3aM2Seq .comm _ZL13mrg32k3aM2Seq,2304,32 .local _ZL13mrg32k3aM1Seq .comm _ZL13mrg32k3aM1Seq,2304,32 .local _ZL16mrg32k3aM2SubSeq .comm _ZL16mrg32k3aM2SubSeq,2016,32 .local _ZL16mrg32k3aM1SubSeq .comm _ZL16mrg32k3aM1SubSeq,2016,32 .local _ZL10mrg32k3aM2 .comm _ZL10mrg32k3aM2,2304,32 .local _ZL10mrg32k3aM1 .comm _ZL10mrg32k3aM1,2304,32 .local _ZL28precalc_xorwow_offset_matrix .comm _ZL28precalc_xorwow_offset_matrix,102400,32 .local _ZL21precalc_xorwow_matrix .comm _ZL21precalc_xorwow_matrix,102400,32 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC11: .long 0 .long 1071644672 .align 8 .LC13: .long 0 .long 1072168960 .align 8 .LC14: .long -858993459 .long 1072483532 .align 8 .LC15: .long 0 .long 1093567616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "ising_annealing_parallel.hip" .globl _Z10printArrayPii # -- Begin function _Z10printArrayPii .p2align 4, 0x90 .type _Z10printArrayPii,@function _Z10printArrayPii: # @_Z10printArrayPii .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB0_4 # %bb.1: # %.lr.ph.preheader pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %rbx movl %esi, %r14d xorl %r15d, %r15d .p2align 4, 0x90 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movl (%rbx,%r15,4), %esi movl $.L.str, %edi xorl %eax, %eax callq printf incq %r15 cmpq %r15, %r14 jne .LBB0_2 # %bb.3: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r14 .cfi_restore %r15 .LBB0_4: # %._crit_edge movl $10, %edi jmp putchar@PLT # TAILCALL .Lfunc_end0: .size _Z10printArrayPii, .Lfunc_end0-_Z10printArrayPii .cfi_endproc # -- End function .globl _Z7getSizeP8_IO_FILE # -- Begin function _Z7getSizeP8_IO_FILE .p2align 4, 0x90 .type _Z7getSizeP8_IO_FILE,@function _Z7getSizeP8_IO_FILE: # @_Z7getSizeP8_IO_FILE .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $16, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %r14 movl $-1, %ebx leaq 12(%rsp), %r15 .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movl $.L.str.2, %esi movq %r14, %rdi movq %r15, %rdx xorl %eax, %eax callq __isoc23_fscanf incl %ebx cmpl $1, %eax je .LBB1_1 # %bb.2: movl %ebx, %eax addq $16, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z7getSizeP8_IO_FILE, .Lfunc_end1-_Z7getSizeP8_IO_FILE .cfi_endproc # -- End function .globl _Z8getArrayP8_IO_FILEPi # -- Begin function _Z8getArrayP8_IO_FILEPi .p2align 4, 0x90 .type _Z8getArrayP8_IO_FILEPi,@function _Z8getArrayP8_IO_FILEPi: # @_Z8getArrayP8_IO_FILEPi .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movq %rsi, %rbx movq %rdi, %r14 .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 movl $.L.str.2, %esi movq %r14, %rdi movq %rbx, %rdx xorl %eax, %eax callq __isoc23_fscanf addq $4, %rbx cmpl $1, %eax je .LBB2_1 # %bb.2: addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z8getArrayP8_IO_FILEPi, .Lfunc_end2-_Z8getArrayP8_IO_FILEPi .cfi_endproc # -- End function .globl _Z33__device_stub__isingAnnealingStepPiS_S_S_ii # -- Begin function _Z33__device_stub__isingAnnealingStepPiS_S_S_ii .p2align 4, 0x90 .type _Z33__device_stub__isingAnnealingStepPiS_S_S_ii,@function _Z33__device_stub__isingAnnealingStepPiS_S_S_ii: # @_Z33__device_stub__isingAnnealingStepPiS_S_S_ii .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movl %r8d, 12(%rsp) movl %r9d, 8(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z18isingAnnealingStepPiS_S_S_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end3: .size _Z33__device_stub__isingAnnealingStepPiS_S_S_ii, .Lfunc_end3-_Z33__device_stub__isingAnnealingStepPiS_S_S_ii .cfi_endproc # -- End function .globl _Z18computeHamiltonianPiS_S_i # -- Begin function _Z18computeHamiltonianPiS_S_i .p2align 4, 0x90 .type _Z18computeHamiltonianPiS_S_i,@function _Z18computeHamiltonianPiS_S_i: # @_Z18computeHamiltonianPiS_S_i .cfi_startproc # %bb.0: movslq %ecx, %r8 movslq (%rsi,%r8,4), %rcx movslq 4(%rsi,%r8,4), %rsi xorl %eax, %eax cmpl %esi, %ecx jge .LBB4_3 # %bb.1: # %.lr.ph.preheader movl (%rdx,%r8,4), %r8d xorl %eax, %eax .p2align 4, 0x90 .LBB4_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movslq (%rdi,%rcx,4), %r9 movl (%rdx,%r9,4), %r9d imull %r8d, %r9d imull 4(%rdi,%rcx,4), %r9d subl %r9d, %eax addq $2, %rcx cmpq %rsi, %rcx jl .LBB4_2 .LBB4_3: # %._crit_edge retq .Lfunc_end4: .size _Z18computeHamiltonianPiS_S_i, .Lfunc_end4-_Z18computeHamiltonianPiS_S_i .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI5_0: .quad 0x3fe0000000000000 # double 0.5 .LCPI5_1: .quad 0x3fe8000000000000 # double 0.75 .LCPI5_2: .quad 0x3feccccccccccccd # double 0.90000000000000002 .LCPI5_3: .quad 0x412e848000000000 # double 1.0E+6 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $264, %rsp # imm = 0x108 .cfi_def_cfa_offset 320 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $.L.str.3, %edi movl $.L.str.4, %esi callq fopen testq %rax, %rax je .LBB5_1 # %bb.2: movq %rax, %r14 movq $-4, %r12 movl $-1, %ebx leaq 208(%rsp), %r15 .p2align 4, 0x90 .LBB5_3: # =>This Inner Loop Header: Depth=1 movl $.L.str.2, %esi movq %r14, %rdi movq %r15, %rdx xorl %eax, %eax callq __isoc23_fscanf addq $4, %r12 incl %ebx cmpl $1, %eax je .LBB5_3 # %bb.4: # %_Z7getSizeP8_IO_FILE.exit movl $.L.str.6, %edi movl %ebx, %esi xorl %eax, %eax callq printf movq %r12, %rdi callq malloc movq %rax, %rbx movq %r14, %rdi xorl %esi, %esi xorl %edx, %edx callq fseek movq %rbx, %r15 .p2align 4, 0x90 .LBB5_5: # =>This Inner Loop Header: Depth=1 movl $.L.str.2, %esi movq %r14, %rdi movq %r15, %rdx xorl %eax, %eax callq __isoc23_fscanf addq $4, %r15 cmpl $1, %eax je .LBB5_5 # %bb.6: # %_Z8getArrayP8_IO_FILEPi.exit movq %r14, %rdi callq fclose movl $.L.str.7, %edi movl $.L.str.4, %esi callq fopen testq %rax, %rax je .LBB5_1 # %bb.7: movq %rax, %r15 movl $-2, %r14d movq $-4, %rbp leaq 208(%rsp), %r13 .p2align 4, 0x90 .LBB5_8: # =>This Inner Loop Header: Depth=1 movl $.L.str.2, %esi movq %r15, %rdi movq %r13, %rdx xorl %eax, %eax callq __isoc23_fscanf incl %r14d addq $4, %rbp cmpl $1, %eax je .LBB5_8 # %bb.9: # %_Z7getSizeP8_IO_FILE.exit94 leal 1(%r14), %esi movl $.L.str.8, %edi movl %esi, 12(%rsp) # 4-byte Spill xorl %eax, %eax callq printf movq %rbp, %rdi callq malloc movq %rax, %r13 movq %r15, %rdi xorl %esi, %esi xorl %edx, %edx callq fseek movq %r13, 80(%rsp) # 8-byte Spill .p2align 4, 0x90 .LBB5_10: # =>This Inner Loop Header: Depth=1 movl $.L.str.2, %esi movq %r15, %rdi movq %r13, %rdx xorl %eax, %eax callq __isoc23_fscanf addq $4, %r13 cmpl $1, %eax je .LBB5_10 # %bb.11: # %_Z8getArrayP8_IO_FILEPi.exit97 movq %r15, %rdi callq fclose leaq -4(%rbp), %rdi movq %rdi, 72(%rsp) # 8-byte Spill callq malloc movq %rax, %r13 movl $.L.str.9, %edi movl %r14d, %esi xorl %eax, %eax callq printf movq %r14, 48(%rsp) # 8-byte Spill movl %r14d, %r9d cmpl $2, 12(%rsp) # 4-byte Folded Reload movq %r9, 88(%rsp) # 8-byte Spill jb .LBB5_14 # %bb.12: # %.lr.ph.preheader xorl %r14d, %r14d .p2align 4, 0x90 .LBB5_13: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand movq 88(%rsp), %r9 # 8-byte Reload movl %eax, %ecx shrl $31, %ecx addl %eax, %ecx andl $2147483646, %ecx # imm = 0x7FFFFFFE subl %ecx, %eax addl %eax, %eax movl $1, %ecx subl %eax, %ecx movl %ecx, (%r13,%r14,4) incq %r14 cmpq %r14, %r9 jne .LBB5_13 .LBB5_14: # %.preheader xorpd %xmm1, %xmm1 cmpl $2, 12(%rsp) # 4-byte Folded Reload movq 80(%rsp), %r15 # 8-byte Reload jb .LBB5_20 # %bb.15: # %.lr.ph114.preheader xorl %eax, %eax jmp .LBB5_16 .p2align 4, 0x90 .LBB5_19: # %_Z18computeHamiltonianPiS_S_i.exit # in Loop: Header=BB5_16 Depth=1 xorps %xmm0, %xmm0 cvtsi2sd %esi, %xmm0 addsd %xmm0, %xmm1 incq %rax cmpq %r9, %rax je .LBB5_20 .LBB5_16: # %.lr.ph114 # =>This Loop Header: Depth=1 # Child Loop BB5_18 Depth 2 movslq (%r15,%rax,4), %rcx movslq 4(%r15,%rax,4), %rdx movl $0, %esi cmpl %edx, %ecx jge .LBB5_19 # %bb.17: # %.lr.ph.preheader.i # in Loop: Header=BB5_16 Depth=1 movl (%r13,%rax,4), %edi xorl %esi, %esi .p2align 4, 0x90 .LBB5_18: # %.lr.ph.i # Parent Loop BB5_16 Depth=1 # => This Inner Loop Header: Depth=2 movslq (%rbx,%rcx,4), %r8 movl (%r13,%r8,4), %r8d imull %edi, %r8d imull 4(%rbx,%rcx,4), %r8d subl %r8d, %esi addq $2, %rcx cmpq %rdx, %rcx jl .LBB5_18 jmp .LBB5_19 .LBB5_1: movl $.L.str.5, %edi xorl %eax, %eax callq printf movl $-1, %eax jmp .LBB5_31 .LBB5_20: # %._crit_edge movsd .LCPI5_0(%rip), %xmm0 # xmm0 = mem[0],zero movsd %xmm1, 112(%rsp) # 8-byte Spill mulsd %xmm1, %xmm0 movl $.L.str.10, %edi movb $1, %al callq printf leaq 120(%rsp), %rdi movl $4, %esi callq hipMalloc leaq 40(%rsp), %rdi movq %r12, %rsi callq hipMalloc leaq 32(%rsp), %rdi movq %rbp, %rsi callq hipMalloc leaq 24(%rsp), %rdi movq 72(%rsp), %r14 # 8-byte Reload movq %r14, %rsi callq hipMalloc movq 40(%rsp), %rdi movq %rbx, %rsi movq %r12, %rdx movl $1, %ecx callq hipMemcpy movq 32(%rsp), %rdi movq %r15, %rsi movq %rbp, %rdx movl $1, %ecx callq hipMemcpy movq 24(%rsp), %rdi movq %r13, %rsi movq %r14, %rdx movl $1, %ecx callq hipMemcpy movq 48(%rsp), %rax # 8-byte Reload leal 1023(%rax), %r14d testl %eax, %eax cmovnsl %eax, %r14d sarl $10, %r14d incl %r14d xorps %xmm0, %xmm0 cvtsi2sd %eax, %xmm0 mulsd .LCPI5_1(%rip), %xmm0 movsd %xmm0, 96(%rsp) # 8-byte Spill callq clock movsd 96(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movq %rax, 104(%rsp) # 8-byte Spill movabsq $4294967296, %rax # imm = 0x100000000 orq %rax, %r14 movq %r14, 56(%rsp) # 8-byte Spill movl $100, %r15d addq $1024, %rax # imm = 0x400 movq %rax, 16(%rsp) # 8-byte Spill jmp .LBB5_21 .p2align 4, 0x90 .LBB5_23: # in Loop: Header=BB5_21 Depth=1 xorps %xmm0, %xmm0 cvtsi2sd %r14d, %xmm0 mulsd .LCPI5_2(%rip), %xmm0 decl %r15d je .LBB5_24 .LBB5_21: # =>This Inner Loop Header: Depth=1 cvttsd2si %xmm0, %r14d movq 56(%rsp), %rdi # 8-byte Reload movl $1, %esi movq 16(%rsp), %rdx # 8-byte Reload movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_23 # %bb.22: # in Loop: Header=BB5_21 Depth=1 movq 40(%rsp), %rax movq 32(%rsp), %rcx movq 24(%rsp), %rdx movq 120(%rsp), %rsi movq %rax, 200(%rsp) movq %rcx, 192(%rsp) movq %rdx, 184(%rsp) movq %rsi, 176(%rsp) movq 48(%rsp), %rax # 8-byte Reload movl %eax, 68(%rsp) movl %r14d, 64(%rsp) leaq 200(%rsp), %rax movq %rax, 208(%rsp) leaq 192(%rsp), %rax movq %rax, 216(%rsp) leaq 184(%rsp), %rax movq %rax, 224(%rsp) leaq 176(%rsp), %rax movq %rax, 232(%rsp) leaq 68(%rsp), %rax movq %rax, 240(%rsp) leaq 64(%rsp), %rax movq %rax, 248(%rsp) leaq 160(%rsp), %rdi leaq 144(%rsp), %rsi leaq 136(%rsp), %rdx leaq 128(%rsp), %rcx callq __hipPopCallConfiguration movq 160(%rsp), %rsi movl 168(%rsp), %edx movq 144(%rsp), %rcx movl 152(%rsp), %r8d movl $_Z18isingAnnealingStepPiS_S_S_ii, %edi leaq 208(%rsp), %r9 pushq 128(%rsp) .cfi_adjust_cfa_offset 8 pushq 144(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 jmp .LBB5_23 .LBB5_24: callq clock subq 104(%rsp), %rax # 8-byte Folded Reload xorps %xmm0, %xmm0 cvtsi2sd %rax, %xmm0 divsd .LCPI5_3(%rip), %xmm0 movsd %xmm0, 16(%rsp) # 8-byte Spill movq 40(%rsp), %rsi movq %rbx, %rdi movq %r12, %rdx movl $2, %ecx callq hipMemcpy movq 32(%rsp), %rsi movq 80(%rsp), %r12 # 8-byte Reload movq %r12, %rdi movq %rbp, %rdx movl $2, %ecx callq hipMemcpy movq 24(%rsp), %rsi movq %r13, %rdi movq 72(%rsp), %rdx # 8-byte Reload movl $2, %ecx callq hipMemcpy xorpd %xmm1, %xmm1 cmpl $2, 12(%rsp) # 4-byte Folded Reload movq 48(%rsp), %r14 # 8-byte Reload movq 88(%rsp), %r9 # 8-byte Reload jb .LBB5_30 # %bb.25: # %.lr.ph121.preheader xorl %eax, %eax jmp .LBB5_26 .p2align 4, 0x90 .LBB5_29: # %_Z18computeHamiltonianPiS_S_i.exit106 # in Loop: Header=BB5_26 Depth=1 xorps %xmm0, %xmm0 cvtsi2sd %esi, %xmm0 addsd %xmm0, %xmm1 incq %rax cmpq %r9, %rax je .LBB5_30 .LBB5_26: # %.lr.ph121 # =>This Loop Header: Depth=1 # Child Loop BB5_28 Depth 2 movslq (%r12,%rax,4), %rcx movslq 4(%r12,%rax,4), %rdx movl $0, %esi cmpl %edx, %ecx jge .LBB5_29 # %bb.27: # %.lr.ph.preheader.i101 # in Loop: Header=BB5_26 Depth=1 movl (%r13,%rax,4), %edi xorl %esi, %esi .p2align 4, 0x90 .LBB5_28: # %.lr.ph.i102 # Parent Loop BB5_26 Depth=1 # => This Inner Loop Header: Depth=2 movslq (%rbx,%rcx,4), %r8 movl (%r13,%r8,4), %r8d imull %edi, %r8d imull 4(%rbx,%rcx,4), %r8d subl %r8d, %esi addq $2, %rcx cmpq %rdx, %rcx jl .LBB5_28 jmp .LBB5_29 .LBB5_30: # %._crit_edge122 movsd %xmm1, 56(%rsp) # 8-byte Spill movsd .LCPI5_0(%rip), %xmm0 # xmm0 = mem[0],zero mulsd %xmm1, %xmm0 movl $.L.str.11, %edi movb $1, %al callq printf movl $.L.str.12, %edi movsd 16(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movb $1, %al callq printf movl $.Lstr, %edi callq puts@PLT movl $.L.str.14, %edi movl $.L.str.15, %esi callq fopen movq %rax, %r15 movl $.L.str.16, %esi movq %rax, %rdi movl %r14d, %edx movsd 16(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero movsd 112(%rsp), %xmm1 # 8-byte Reload # xmm1 = mem[0],zero movsd 56(%rsp), %xmm2 # 8-byte Reload # xmm2 = mem[0],zero movb $3, %al callq fprintf movq %r15, %rdi callq fclose movl $.Lstr.1, %edi callq puts@PLT movq %rbx, %rdi callq free movq %r12, %rdi callq free movq %r13, %rdi callq free movq 40(%rsp), %rdi callq hipFree movq 32(%rsp), %rdi callq hipFree movq 24(%rsp), %rdi callq hipFree xorl %eax, %eax .LBB5_31: addq $264, %rsp # imm = 0x108 .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size main, .Lfunc_end5-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18isingAnnealingStepPiS_S_S_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%d " .size .L.str, 4 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%d" .size .L.str.2, 3 .type _Z18isingAnnealingStepPiS_S_S_ii,@object # @_Z18isingAnnealingStepPiS_S_S_ii .section .rodata,"a",@progbits .globl _Z18isingAnnealingStepPiS_S_S_ii .p2align 3, 0x0 _Z18isingAnnealingStepPiS_S_S_ii: .quad _Z33__device_stub__isingAnnealingStepPiS_S_S_ii .size _Z18isingAnnealingStepPiS_S_S_ii, 8 .type .L.str.3,@object # @.str.3 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.3: .asciz "flat.txt" .size .L.str.3, 9 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "r" .size .L.str.4, 2 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "Error!" .size .L.str.5, 7 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "Number of integers present in flat.txt: %d\n" .size .L.str.6, 44 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "places.txt" .size .L.str.7, 11 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "Number of integers present in places.txt: %d\n" .size .L.str.8, 46 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz "Number of vertices: %d\n" .size .L.str.9, 24 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "Initial energy: %f\n" .size .L.str.10, 20 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz "Final energy: %f\n" .size .L.str.11, 18 .type .L.str.12,@object # @.str.12 .L.str.12: .asciz "Time taken for parallel Ising Annealing: %f seconds\n" .size .L.str.12, 53 .type .L.str.14,@object # @.str.14 .L.str.14: .asciz "results.txt" .size .L.str.14, 12 .type .L.str.15,@object # @.str.15 .L.str.15: .asciz "a" .size .L.str.15, 2 .type .L.str.16,@object # @.str.16 .L.str.16: .asciz "Parallel\t%d\t%f\t%f\t%f\n" .size .L.str.16, 22 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z18isingAnnealingStepPiS_S_S_ii" .size .L__unnamed_1, 33 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Storing parallel code's results in results.txt..." .size .Lstr, 50 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "Finished writing to results.txt" .size .Lstr.1, 32 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z33__device_stub__isingAnnealingStepPiS_S_S_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z18isingAnnealingStepPiS_S_S_ii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void sec_min_cuda_(int nProposal, int C, float *inp, int *offsets, float *out){ for(int p_id = blockIdx.x; p_id < nProposal; p_id += gridDim.x){ int start = offsets[p_id]; int end = offsets[p_id + 1]; for(int plane = threadIdx.x; plane < C; plane += blockDim.x){ float min_val = 1e50; for(int i = start; i < end; i++){ if(inp[i * C + plane] < min_val){ min_val = inp[i * C + plane]; } } out[p_id * C + plane] = min_val; } } }
code for sm_80 Function : _Z13sec_min_cuda_iiPfPiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e240000002500 */ /*0020*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x160], PT ; /* 0x0000580002007a0c */ /* 0x001fda0003f06270 */ /*0030*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e220000002100 */ /*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x164], PT ; /* 0x0000590000007a0c */ /* 0x001fe20003f06270 */ /*0070*/ BSSY B0, 0xc50 ; /* 0x00000bd000007945 */ /* 0x000fd80003800000 */ /*0080*/ @P0 BRA 0xc40 ; /* 0x00000bb000000947 */ /* 0x000fea0003800000 */ /*0090*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4 ; /* 0x00000004ff097424 */ /* 0x000fc800078e00ff */ /*00a0*/ IMAD.WIDE R6, R2, R9, c[0x0][0x170] ; /* 0x00005c0002067625 */ /* 0x000fca00078e0209 */ /*00b0*/ LDG.E R3, [R6.64] ; /* 0x0000000406037981 */ /* 0x000ea8000c1e1900 */ /*00c0*/ LDG.E R4, [R6.64+0x4] ; /* 0x0000040406047981 */ /* 0x000ea4000c1e1900 */ /*00d0*/ ISETP.GT.AND P0, PT, R4, R3, PT ; /* 0x000000030400720c */ /* 0x004fda0003f04270 */ /*00e0*/ @P0 BRA 0x180 ; /* 0x0000009000000947 */ /* 0x000fea0003800000 */ /*00f0*/ IMAD.MOV.U32 R3, RZ, RZ, R0 ; /* 0x000000ffff037224 */ /* 0x000fe400078e0000 */ /*0100*/ IMAD.MOV.U32 R7, RZ, RZ, 0x7f800000 ; /* 0x7f800000ff077424 */ /* 0x000fe400078e00ff */ /*0110*/ IMAD R4, R2, c[0x0][0x164], R3 ; /* 0x0000590002047a24 */ /* 0x001fe200078e0203 */ /*0120*/ IADD3 R3, R3, c[0x0][0x0], RZ ; /* 0x0000000003037a10 */ /* 0x000fc60007ffe0ff */ /*0130*/ IMAD.WIDE R4, R4, R9, c[0x0][0x178] ; /* 0x00005e0004047625 */ /* 0x000fe200078e0209 */ /*0140*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x164], PT ; /* 0x0000590003007a0c */ /* 0x000fc80003f06270 */ /*0150*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x0001f2000c101904 */ /*0160*/ @!P0 BRA 0x110 ; /* 0xffffffa000008947 */ /* 0x000fea000383ffff */ /*0170*/ BRA 0xc40 ; /* 0x00000ac000007947 */ /* 0x000fea0003800000 */ /*0180*/ LOP3.LUT R5, RZ, R3, RZ, 0x33, !PT ; /* 0x00000003ff057212 */ /* 0x000fe200078e33ff */ /*0190*/ IMAD.IADD R9, R4.reuse, 0x1, -R3 ; /* 0x0000000104097824 */ /* 0x040fe200078e0a03 */ /*01a0*/ IADD3 R6, R3.reuse, 0x2, RZ ; /* 0x0000000203067810 */ /* 0x040fe20007ffe0ff */ /*01b0*/ IMAD.MOV.U32 R8, RZ, RZ, R0 ; /* 0x000000ffff087224 */ /* 0x000fe200078e0000 */ /*01c0*/ IADD3 R7, R3, 0x3, RZ ; /* 0x0000000303077810 */ /* 0x000fe20007ffe0ff */ /*01d0*/ IMAD.IADD R5, R4, 0x1, R5 ; /* 0x0000000104057824 */ /* 0x000fe200078e0205 */ /*01e0*/ LOP3.LUT R9, R9, 0x3, RZ, 0xc0, !PT ; /* 0x0000000309097812 */ /* 0x000fc800078ec0ff */ /*01f0*/ ISETP.GE.U32.AND P1, PT, R5, 0x3, PT ; /* 0x000000030500780c */ /* 0x000fe40003f26070 */ /*0200*/ IADD3 R5, R3, 0x1, RZ ; /* 0x0000000103057810 */ /* 0x000fe40007ffe0ff */ /*0210*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */ /* 0x000fe20003f05270 */ /*0220*/ IMAD.MOV.U32 R14, RZ, RZ, 0x7f800000 ; /* 0x7f800000ff0e7424 */ /* 0x001fe400078e00ff */ /*0230*/ IMAD.MOV.U32 R11, RZ, RZ, R3 ; /* 0x000000ffff0b7224 */ /* 0x000fd400078e0003 */ /*0240*/ @!P0 BRA 0x3a0 ; /* 0x0000015000008947 */ /* 0x000fea0003800000 */ /*0250*/ IMAD.MOV.U32 R17, RZ, RZ, 0x4 ; /* 0x00000004ff117424 */ /* 0x000fe400078e00ff */ /*0260*/ IMAD R10, R3, c[0x0][0x164], R8 ; /* 0x00005900030a7a24 */ /* 0x000fc800078e0208 */ /*0270*/ IMAD.WIDE R12, R10, R17, c[0x0][0x168] ; /* 0x00005a000a0c7625 */ /* 0x000fcc00078e0211 */ /*0280*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */ /* 0x000ea2000c1e1900 */ /*0290*/ ISETP.NE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */ /* 0x000fe20003f05270 */ /*02a0*/ IMAD.MOV.U32 R11, RZ, RZ, R5 ; /* 0x000000ffff0b7224 */ /* 0x000fe200078e0005 */ /*02b0*/ FMNMX R14, R12, +INF , PT ; /* 0x7f8000000c0e7809 */ /* 0x004fd60003800000 */ /*02c0*/ @!P0 BRA 0x3a0 ; /* 0x000000d000008947 */ /* 0x000fea0003800000 */ /*02d0*/ ISETP.NE.AND P2, PT, R9, 0x2, PT ; /* 0x000000020900780c */ /* 0x000fe40003f45270 */ /*02e0*/ IADD3 R10, R10, c[0x0][0x164], RZ ; /* 0x000059000a0a7a10 */ /* 0x000fca0007ffe0ff */ /*02f0*/ IMAD.WIDE R12, R10, R17, c[0x0][0x168] ; /* 0x00005a000a0c7625 */ /* 0x000fcc00078e0211 */ /*0300*/ @P2 IADD3 R16, R10, c[0x0][0x164], RZ ; /* 0x000059000a102a10 */ /* 0x000fe20007ffe0ff */ /*0310*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */ /* 0x000ea8000c1e1900 */ /*0320*/ @P2 IMAD.WIDE R16, R16, R17, c[0x0][0x168] ; /* 0x00005a0010102625 */ /* 0x000fcc00078e0211 */ /*0330*/ @P2 LDG.E R17, [R16.64] ; /* 0x0000000410112981 */ /* 0x000ee2000c1e1900 */ /*0340*/ IMAD.MOV.U32 R11, RZ, RZ, R6 ; /* 0x000000ffff0b7224 */ /* 0x000fe400078e0006 */ /*0350*/ @P2 IMAD.MOV.U32 R11, RZ, RZ, R7 ; /* 0x000000ffff0b2224 */ /* 0x000fe200078e0007 */ /*0360*/ FSETP.GEU.AND P0, PT, R13, R14, PT ; /* 0x0000000e0d00720b */ /* 0x004fc80003f0e000 */ /*0370*/ FSEL R14, R13, R14, !P0 ; /* 0x0000000e0d0e7208 */ /* 0x000fc80004000000 */ /*0380*/ @P2 FSETP.GEU.AND P0, PT, R17, R14, PT ; /* 0x0000000e1100220b */ /* 0x008fc80003f0e000 */ /*0390*/ @P2 FSEL R14, R17, R14, !P0 ; /* 0x0000000e110e2208 */ /* 0x000fc80004000000 */ /*03a0*/ @!P1 BRA 0xbd0 ; /* 0x0000082000009947 */ /* 0x000fea0003800000 */ /*03b0*/ IMAD.IADD R10, R4, 0x1, -R11 ; /* 0x00000001040a7824 */ /* 0x000fe200078e0a0b */ /*03c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fe20003f0f070 */ /*03d0*/ IMAD.MOV.U32 R12, RZ, RZ, 0x4 ; /* 0x00000004ff0c7424 */ /* 0x000fe400078e00ff */ /*03e0*/ IMAD R17, R11, c[0x0][0x164], R8 ; /* 0x000059000b117a24 */ /* 0x000fe200078e0208 */ /*03f0*/ ISETP.GT.AND P2, PT, R10, 0xc, PT ; /* 0x0000000c0a00780c */ /* 0x000fc60003f44270 */ /*0400*/ IMAD.WIDE R16, R17, R12, c[0x0][0x168] ; /* 0x00005a0011107625 */ /* 0x000fd400078e020c */ /*0410*/ @!P2 BRA 0x870 ; /* 0x000004500000a947 */ /* 0x000fea0003800000 */ /*0420*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0430*/ IADD3 R10, R4, -0xc, RZ ; /* 0xfffffff4040a7810 */ /* 0x000fe40007ffe0ff */ /*0440*/ LDG.E R15, [R16.64] ; /* 0x00000004100f7981 */ /* 0x0000a2000c1e1900 */ /*0450*/ IMAD.WIDE R26, R12, c[0x0][0x164], R16 ; /* 0x000059000c1a7a25 */ /* 0x000fca00078e0210 */ /*0460*/ LDG.E R22, [R26.64] ; /* 0x000000041a167981 */ /* 0x0002e2000c1e1900 */ /*0470*/ IMAD.WIDE R18, R12, c[0x0][0x164], R26 ; /* 0x000059000c127a25 */ /* 0x000fca00078e021a */ /*0480*/ LDG.E R29, [R18.64] ; /* 0x00000004121d7981 */ /* 0x000964000c1e1900 */ /*0490*/ IMAD.WIDE R18, R12, c[0x0][0x164], R18 ; /* 0x000059000c127a25 */ /* 0x010fca00078e0212 */ /*04a0*/ LDG.E R21, [R18.64] ; /* 0x0000000412157981 */ /* 0x000962000c1e1900 */ /*04b0*/ IMAD.WIDE R24, R12, c[0x0][0x164], R18 ; /* 0x000059000c187a25 */ /* 0x000fca00078e0212 */ /*04c0*/ LDG.E R13, [R24.64] ; /* 0x00000004180d7981 */ /* 0x000164000c1e1900 */ /*04d0*/ IMAD.WIDE R24, R12, c[0x0][0x164], R24 ; /* 0x000059000c187a25 */ /* 0x001fca00078e0218 */ /*04e0*/ LDG.E R20, [R24.64] ; /* 0x0000000418147981 */ /* 0x000162000c1e1900 */ /*04f0*/ IMAD.WIDE R16, R12, c[0x0][0x164], R24 ; /* 0x000059000c107a25 */ /* 0x000fca00078e0218 */ /*0500*/ LDG.E R28, [R16.64] ; /* 0x00000004101c7981 */ /* 0x000164000c1e1900 */ /*0510*/ IMAD.WIDE R16, R12, c[0x0][0x164], R16 ; /* 0x000059000c107a25 */ /* 0x001fca00078e0210 */ /*0520*/ LDG.E R27, [R16.64] ; /* 0x00000004101b7981 */ /* 0x002162000c1e1900 */ /*0530*/ IMAD.WIDE R18, R12, c[0x0][0x164], R16 ; /* 0x000059000c127a25 */ /* 0x010fca00078e0210 */ /*0540*/ LDG.E R26, [R18.64] ; /* 0x00000004121a7981 */ /* 0x000324000c1e1900 */ /*0550*/ IMAD.WIDE R18, R12, c[0x0][0x164], R18 ; /* 0x000059000c127a25 */ /* 0x002fca00078e0212 */ /*0560*/ LDG.E R25, [R18.64] ; /* 0x0000000412197981 */ /* 0x000322000c1e1900 */ /*0570*/ IMAD.WIDE R16, R12, c[0x0][0x164], R18 ; /* 0x000059000c107a25 */ /* 0x001fca00078e0212 */ /*0580*/ LDG.E R24, [R16.64] ; /* 0x0000000410187981 */ /* 0x000122000c1e1900 */ /*0590*/ FSETP.GEU.AND P2, PT, R15, R14, PT ; /* 0x0000000e0f00720b */ /* 0x004fc80003f4e000 */ /*05a0*/ FSEL R23, R15, R14, !P2 ; /* 0x0000000e0f177208 */ /* 0x000fe20005000000 */ /*05b0*/ IMAD.WIDE R14, R12, c[0x0][0x164], R16 ; /* 0x000059000c0e7a25 */ /* 0x000fc600078e0210 */ /*05c0*/ FSETP.GEU.AND P2, PT, R22, R23, PT ; /* 0x000000171600720b */ /* 0x008fc80003f4e000 */ /*05d0*/ FSEL R22, R22, R23, !P2 ; /* 0x0000001716167208 */ /* 0x000fe40005000000 */ /*05e0*/ LDG.E R23, [R14.64] ; /* 0x000000040e177981 */ /* 0x0004e2000c1e1900 */ /*05f0*/ IMAD.WIDE R16, R12, c[0x0][0x164], R14 ; /* 0x000059000c107a25 */ /* 0x001fe200078e020e */ /*0600*/ FSETP.GEU.AND P2, PT, R29, R22, PT ; /* 0x000000161d00720b */ /* 0x020fca0003f4e000 */ /*0610*/ IMAD.WIDE R18, R12, c[0x0][0x164], R16 ; /* 0x000059000c127a25 */ /* 0x002fe200078e0210 */ /*0620*/ LDG.E R14, [R16.64] ; /* 0x00000004100e7981 */ /* 0x0040a2000c1e1900 */ /*0630*/ FSEL R29, R29, R22, !P2 ; /* 0x000000161d1d7208 */ /* 0x000fc60005000000 */ /*0640*/ LDG.E R22, [R18.64] ; /* 0x0000000412167981 */ /* 0x000362000c1e1900 */ /*0650*/ FSETP.GEU.AND P2, PT, R21, R29, PT ; /* 0x0000001d1500720b */ /* 0x000fc80003f4e000 */ /*0660*/ FSEL R29, R21, R29, !P2 ; /* 0x0000001d151d7208 */ /* 0x000fe20005000000 */ /*0670*/ IMAD.WIDE R18, R12, c[0x0][0x164], R18 ; /* 0x000059000c127a25 */ /* 0x002fc600078e0212 */ /*0680*/ FSETP.GEU.AND P2, PT, R13.reuse, R29.reuse, PT ; /* 0x0000001d0d00720b */ /* 0x0c0fe40003f4e000 */ /*0690*/ LDG.E R21, [R18.64] ; /* 0x0000000412157981 */ /* 0x000f62000c1e1900 */ /*06a0*/ IMAD.WIDE R16, R12, c[0x0][0x164], R18 ; /* 0x000059000c107a25 */ /* 0x001fe200078e0212 */ /*06b0*/ FSEL R29, R13, R29, !P2 ; /* 0x0000001d0d1d7208 */ /* 0x000fc80005000000 */ /*06c0*/ LDG.E R13, [R16.64] ; /* 0x00000004100d7981 */ /* 0x000162000c1e1900 */ /*06d0*/ FSETP.GEU.AND P2, PT, R20, R29, PT ; /* 0x0000001d1400720b */ /* 0x000fc80003f4e000 */ /*06e0*/ FSEL R29, R20, R29, !P2 ; /* 0x0000001d141d7208 */ /* 0x000fc80005000000 */ /*06f0*/ FSETP.GEU.AND P2, PT, R28, R29, PT ; /* 0x0000001d1c00720b */ /* 0x000fc80003f4e000 */ /*0700*/ FSEL R28, R28, R29, !P2 ; /* 0x0000001d1c1c7208 */ /* 0x000fc80005000000 */ /*0710*/ FSETP.GEU.AND P2, PT, R27, R28, PT ; /* 0x0000001c1b00720b */ /* 0x000fc80003f4e000 */ /*0720*/ FSEL R27, R27, R28, !P2 ; /* 0x0000001c1b1b7208 */ /* 0x000fc80005000000 */ /*0730*/ FSETP.GEU.AND P2, PT, R26, R27, PT ; /* 0x0000001b1a00720b */ /* 0x010fc80003f4e000 */ /*0740*/ FSEL R26, R26, R27, !P2 ; /* 0x0000001b1a1a7208 */ /* 0x000fc80005000000 */ /*0750*/ FSETP.GEU.AND P2, PT, R25, R26, PT ; /* 0x0000001a1900720b */ /* 0x000fc80003f4e000 */ /*0760*/ FSEL R25, R25, R26, !P2 ; /* 0x0000001a19197208 */ /* 0x000fc80005000000 */ /*0770*/ FSETP.GEU.AND P2, PT, R24, R25, PT ; /* 0x000000191800720b */ /* 0x000fc80003f4e000 */ /*0780*/ FSEL R24, R24, R25, !P2 ; /* 0x0000001918187208 */ /* 0x000fe40005000000 */ /*0790*/ IADD3 R11, R11, 0x10, RZ ; /* 0x000000100b0b7810 */ /* 0x000fc80007ffe0ff */ /*07a0*/ ISETP.GE.AND P3, PT, R11, R10, PT ; /* 0x0000000a0b00720c */ /* 0x000fe20003f66270 */ /*07b0*/ IMAD.WIDE R16, R12, c[0x0][0x164], R16 ; /* 0x000059000c107a25 */ /* 0x001fe200078e0210 */ /*07c0*/ FSETP.GEU.AND P2, PT, R23, R24, PT ; /* 0x000000181700720b */ /* 0x008fc80003f4e000 */ /*07d0*/ FSEL R23, R23, R24, !P2 ; /* 0x0000001817177208 */ /* 0x000fc80005000000 */ /*07e0*/ FSETP.GEU.AND P2, PT, R14, R23, PT ; /* 0x000000170e00720b */ /* 0x004fc80003f4e000 */ /*07f0*/ FSEL R23, R14, R23, !P2 ; /* 0x000000170e177208 */ /* 0x000fc80005000000 */ /*0800*/ FSETP.GEU.AND P2, PT, R22, R23, PT ; /* 0x000000171600720b */ /* 0x020fc80003f4e000 */ /*0810*/ FSEL R22, R22, R23, !P2 ; /* 0x0000001716167208 */ /* 0x000fc80005000000 */ /*0820*/ FSETP.GEU.AND P2, PT, R21, R22, PT ; /* 0x000000161500720b */ /* 0x000fc80003f4e000 */ /*0830*/ FSEL R14, R21, R22, !P2 ; /* 0x00000016150e7208 */ /* 0x000fc80005000000 */ /*0840*/ FSETP.GEU.AND P2, PT, R13, R14, PT ; /* 0x0000000e0d00720b */ /* 0x000fc80003f4e000 */ /*0850*/ FSEL R14, R13, R14, !P2 ; /* 0x0000000e0d0e7208 */ /* 0x000fe20005000000 */ /*0860*/ @!P3 BRA 0x440 ; /* 0xfffffbd00000b947 */ /* 0x000fea000383ffff */ /*0870*/ IMAD.IADD R10, R4, 0x1, -R11 ; /* 0x00000001040a7824 */ /* 0x000fca00078e0a0b */ /*0880*/ ISETP.GT.AND P2, PT, R10, 0x4, PT ; /* 0x000000040a00780c */ /* 0x000fda0003f44270 */ /*0890*/ @!P2 BRA 0xac0 ; /* 0x000002200000a947 */ /* 0x000fea0003800000 */ /*08a0*/ LDG.E R13, [R16.64] ; /* 0x00000004100d7981 */ /* 0x0000a2000c1e1900 */ /*08b0*/ IMAD.WIDE R18, R12, c[0x0][0x164], R16 ; /* 0x000059000c127a25 */ /* 0x000fca00078e0210 */ /*08c0*/ LDG.E R10, [R18.64] ; /* 0x00000004120a7981 */ /* 0x0002e2000c1e1900 */ /*08d0*/ IMAD.WIDE R20, R12, c[0x0][0x164], R18 ; /* 0x000059000c147a25 */ /* 0x000fca00078e0212 */ /*08e0*/ LDG.E R15, [R20.64] ; /* 0x00000004140f7981 */ /* 0x000962000c1e1900 */ /*08f0*/ IMAD.WIDE R22, R12, c[0x0][0x164], R20 ; /* 0x000059000c167a25 */ /* 0x000fca00078e0214 */ /*0900*/ LDG.E R28, [R22.64] ; /* 0x00000004161c7981 */ /* 0x000f62000c1e1900 */ /*0910*/ IMAD.WIDE R24, R12, c[0x0][0x164], R22 ; /* 0x000059000c187a25 */ /* 0x000fca00078e0216 */ /*0920*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */ /* 0x000f62000c1e1900 */ /*0930*/ IMAD.WIDE R26, R12, c[0x0][0x164], R24 ; /* 0x000059000c1a7a25 */ /* 0x000fca00078e0218 */ /*0940*/ LDG.E R20, [R26.64] ; /* 0x000000041a147981 */ /* 0x010f22000c1e1900 */ /*0950*/ IMAD.WIDE R16, R12, c[0x0][0x164], R26 ; /* 0x000059000c107a25 */ /* 0x001fca00078e021a */ /*0960*/ LDG.E R21, [R16.64] ; /* 0x0000000410157981 */ /* 0x000122000c1e1900 */ /*0970*/ IMAD.WIDE R18, R12, c[0x0][0x164], R16 ; /* 0x000059000c127a25 */ /* 0x002fca00078e0210 */ /*0980*/ LDG.E R17, [R18.64] ; /* 0x0000000412117981 */ /* 0x001f22000c1e1900 */ /*0990*/ IADD3 R11, R11, 0x8, RZ ; /* 0x000000080b0b7810 */ /* 0x000fe40007ffe0ff */ /*09a0*/ FSETP.GEU.AND P0, PT, R13, R14, PT ; /* 0x0000000e0d00720b */ /* 0x004fc80003f0e000 */ /*09b0*/ FSEL R13, R13, R14, !P0 ; /* 0x0000000e0d0d7208 */ /* 0x000fc80004000000 */ /*09c0*/ FSETP.GEU.AND P0, PT, R10, R13, PT ; /* 0x0000000d0a00720b */ /* 0x008fc80003f0e000 */ /*09d0*/ FSEL R10, R10, R13, !P0 ; /* 0x0000000d0a0a7208 */ /* 0x000fc80004000000 */ /*09e0*/ FSETP.GEU.AND P0, PT, R15, R10, PT ; /* 0x0000000a0f00720b */ /* 0x020fc80003f0e000 */ /*09f0*/ FSEL R15, R15, R10, !P0 ; /* 0x0000000a0f0f7208 */ /* 0x000fc80004000000 */ /*0a00*/ FSETP.GEU.AND P0, PT, R28, R15, PT ; /* 0x0000000f1c00720b */ /* 0x000fc80003f0e000 */ /*0a10*/ FSEL R28, R28, R15, !P0 ; /* 0x0000000f1c1c7208 */ /* 0x000fc80004000000 */ /*0a20*/ FSETP.GEU.AND P0, PT, R29, R28, PT ; /* 0x0000001c1d00720b */ /* 0x000fc80003f0e000 */ /*0a30*/ FSEL R29, R29, R28, !P0 ; /* 0x0000001c1d1d7208 */ /* 0x000fc80004000000 */ /*0a40*/ FSETP.GEU.AND P0, PT, R20, R29, PT ; /* 0x0000001d1400720b */ /* 0x010fc80003f0e000 */ /*0a50*/ FSEL R20, R20, R29, !P0 ; /* 0x0000001d14147208 */ /* 0x000fc80004000000 */ /*0a60*/ FSETP.GEU.AND P0, PT, R21, R20, PT ; /* 0x000000141500720b */ /* 0x000fc80003f0e000 */ /*0a70*/ FSEL R14, R21, R20, !P0 ; /* 0x00000014150e7208 */ /* 0x000fe40004000000 */ /*0a80*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0a90*/ FSETP.GEU.AND P2, PT, R17, R14, PT ; /* 0x0000000e1100720b */ /* 0x000fc80003f4e000 */ /*0aa0*/ FSEL R14, R17, R14, !P2 ; /* 0x0000000e110e7208 */ /* 0x000fe20005000000 */ /*0ab0*/ IMAD.WIDE R16, R12, c[0x0][0x164], R18 ; /* 0x000059000c107a25 */ /* 0x000fc800078e0212 */ /*0ac0*/ ISETP.LT.OR P0, PT, R11, R4, P0 ; /* 0x000000040b00720c */ /* 0x000fda0000701670 */ /*0ad0*/ @!P0 BRA 0xbd0 ; /* 0x000000f000008947 */ /* 0x000fea0003800000 */ /*0ae0*/ LDG.E R15, [R16.64] ; /* 0x00000004100f7981 */ /* 0x000ea2000c1e1900 */ /*0af0*/ IMAD.WIDE R18, R12, c[0x0][0x164], R16 ; /* 0x000059000c127a25 */ /* 0x000fca00078e0210 */ /*0b00*/ LDG.E R21, [R18.64] ; /* 0x0000000412157981 */ /* 0x000ee2000c1e1900 */ /*0b10*/ IMAD.WIDE R10, R12, c[0x0][0x164], R18 ; /* 0x000059000c0a7a25 */ /* 0x000fca00078e0212 */ /*0b20*/ LDG.E R23, [R10.64] ; /* 0x000000040a177981 */ /* 0x000f22000c1e1900 */ /*0b30*/ IMAD.WIDE R12, R12, c[0x0][0x164], R10 ; /* 0x000059000c0c7a25 */ /* 0x000fcc00078e020a */ /*0b40*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */ /* 0x000f62000c1e1900 */ /*0b50*/ FSETP.GEU.AND P0, PT, R15, R14, PT ; /* 0x0000000e0f00720b */ /* 0x004fc80003f0e000 */ /*0b60*/ FSEL R14, R15, R14, !P0 ; /* 0x0000000e0f0e7208 */ /* 0x000fc80004000000 */ /*0b70*/ FSETP.GEU.AND P0, PT, R21, R14, PT ; /* 0x0000000e1500720b */ /* 0x008fc80003f0e000 */ /*0b80*/ FSEL R14, R21, R14, !P0 ; /* 0x0000000e150e7208 */ /* 0x000fc80004000000 */ /*0b90*/ FSETP.GEU.AND P0, PT, R23, R14, PT ; /* 0x0000000e1700720b */ /* 0x010fc80003f0e000 */ /*0ba0*/ FSEL R14, R23, R14, !P0 ; /* 0x0000000e170e7208 */ /* 0x000fc80004000000 */ /*0bb0*/ FSETP.GEU.AND P0, PT, R13, R14, PT ; /* 0x0000000e0d00720b */ /* 0x020fc80003f0e000 */ /*0bc0*/ FSEL R14, R13, R14, !P0 ; /* 0x0000000e0d0e7208 */ /* 0x000fe40004000000 */ /*0bd0*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */ /* 0x000fe400078e00ff */ /*0be0*/ IMAD R10, R2, c[0x0][0x164], R8 ; /* 0x00005900020a7a24 */ /* 0x000fe200078e0208 */ /*0bf0*/ IADD3 R8, R8, c[0x0][0x0], RZ ; /* 0x0000000008087a10 */ /* 0x000fc60007ffe0ff */ /*0c00*/ IMAD.WIDE R10, R10, R11, c[0x0][0x178] ; /* 0x00005e000a0a7625 */ /* 0x000fe200078e020b */ /*0c10*/ ISETP.GE.AND P0, PT, R8, c[0x0][0x164], PT ; /* 0x0000590008007a0c */ /* 0x000fc80003f06270 */ /*0c20*/ STG.E [R10.64], R14 ; /* 0x0000000e0a007986 */ /* 0x0001f2000c101904 */ /*0c30*/ @!P0 BRA 0x210 ; /* 0xfffff5d000008947 */ /* 0x000fea000383ffff */ /*0c40*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0c50*/ IADD3 R2, R2, c[0x0][0xc], RZ ; /* 0x0000030002027a10 */ /* 0x000fc80007ffe0ff */ /*0c60*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x160], PT ; /* 0x0000580002007a0c */ /* 0x000fda0003f06270 */ /*0c70*/ @!P0 BRA 0x60 ; /* 0xfffff3e000008947 */ /* 0x000fea000383ffff */ /*0c80*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0c90*/ BRA 0xc90; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0ca0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cb0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cc0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cd0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ce0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cf0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void sec_min_cuda_(int nProposal, int C, float *inp, int *offsets, float *out){ for(int p_id = blockIdx.x; p_id < nProposal; p_id += gridDim.x){ int start = offsets[p_id]; int end = offsets[p_id + 1]; for(int plane = threadIdx.x; plane < C; plane += blockDim.x){ float min_val = 1e50; for(int i = start; i < end; i++){ if(inp[i * C + plane] < min_val){ min_val = inp[i * C + plane]; } } out[p_id * C + plane] = min_val; } } }
.file "tmpxft_0017e338_00000000-6_sec_min_cuda_.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z39__device_stub__Z13sec_min_cuda_iiPfPiS_iiPfPiS_ .type _Z39__device_stub__Z13sec_min_cuda_iiPfPiS_iiPfPiS_, @function _Z39__device_stub__Z13sec_min_cuda_iiPfPiS_iiPfPiS_: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 28(%rsp) movl %esi, 24(%rsp) movq %rdx, 16(%rsp) movq %rcx, 8(%rsp) movq %r8, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 24(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z13sec_min_cuda_iiPfPiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z39__device_stub__Z13sec_min_cuda_iiPfPiS_iiPfPiS_, .-_Z39__device_stub__Z13sec_min_cuda_iiPfPiS_iiPfPiS_ .globl _Z13sec_min_cuda_iiPfPiS_ .type _Z13sec_min_cuda_iiPfPiS_, @function _Z13sec_min_cuda_iiPfPiS_: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z13sec_min_cuda_iiPfPiS_iiPfPiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z13sec_min_cuda_iiPfPiS_, .-_Z13sec_min_cuda_iiPfPiS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z13sec_min_cuda_iiPfPiS_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z13sec_min_cuda_iiPfPiS_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void sec_min_cuda_(int nProposal, int C, float *inp, int *offsets, float *out){ for(int p_id = blockIdx.x; p_id < nProposal; p_id += gridDim.x){ int start = offsets[p_id]; int end = offsets[p_id + 1]; for(int plane = threadIdx.x; plane < C; plane += blockDim.x){ float min_val = 1e50; for(int i = start; i < end; i++){ if(inp[i * C + plane] < min_val){ min_val = inp[i * C + plane]; } } out[p_id * C + plane] = min_val; } } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void sec_min_cuda_(int nProposal, int C, float *inp, int *offsets, float *out){ for(int p_id = blockIdx.x; p_id < nProposal; p_id += gridDim.x){ int start = offsets[p_id]; int end = offsets[p_id + 1]; for(int plane = threadIdx.x; plane < C; plane += blockDim.x){ float min_val = 1e50; for(int i = start; i < end; i++){ if(inp[i * C + plane] < min_val){ min_val = inp[i * C + plane]; } } out[p_id * C + plane] = min_val; } } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void sec_min_cuda_(int nProposal, int C, float *inp, int *offsets, float *out){ for(int p_id = blockIdx.x; p_id < nProposal; p_id += gridDim.x){ int start = offsets[p_id]; int end = offsets[p_id + 1]; for(int plane = threadIdx.x; plane < C; plane += blockDim.x){ float min_val = 1e50; for(int i = start; i < end; i++){ if(inp[i * C + plane] < min_val){ min_val = inp[i * C + plane]; } } out[p_id * C + plane] = min_val; } } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z13sec_min_cuda_iiPfPiS_ .globl _Z13sec_min_cuda_iiPfPiS_ .p2align 8 .type _Z13sec_min_cuda_iiPfPiS_,@function _Z13sec_min_cuda_iiPfPiS_: s_load_b32 s3, s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_cmp_ge_i32 s15, s3 s_cbranch_scc1 .LBB0_9 s_clause 0x3 s_load_b32 s16, s[0:1], 0x4 s_load_b128 s[4:7], s[0:1], 0x8 s_load_b64 s[10:11], s[0:1], 0x18 s_load_b32 s17, s[0:1], 0x20 s_add_u32 s12, s0, 32 s_mov_b32 s8, s15 s_addc_u32 s13, s1, 0 s_waitcnt lgkmcnt(0) v_cmp_gt_i32_e64 s2, s16, v0 s_branch .LBB0_3 .LBB0_2: s_set_inst_prefetch_distance 0x2 s_or_b32 exec_lo, exec_lo, s1 s_add_i32 s8, s17, s8 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_ge_i32 s8, s3 s_cbranch_scc1 .LBB0_9 .LBB0_3: s_delay_alu instid0(VALU_DEP_1) s_and_saveexec_b32 s1, s2 s_cbranch_execz .LBB0_2 s_ashr_i32 s9, s8, 31 v_mov_b32_e32 v4, v0 s_lshl_b64 s[14:15], s[8:9], 2 s_mul_i32 s18, s8, s16 s_add_u32 s14, s6, s14 s_addc_u32 s15, s7, s15 s_mov_b32 s20, 0 s_load_b64 s[14:15], s[14:15], 0x0 s_load_b32 s0, s[12:13], 0xc s_waitcnt lgkmcnt(0) v_mad_u64_u32 v[1:2], null, s16, s14, v[0:1] s_cmp_lt_i32 s14, s15 s_cselect_b32 s9, -1, 0 s_and_b32 s19, s0, 0xffff s_set_inst_prefetch_distance 0x1 s_branch .LBB0_6 .p2align 6 .LBB0_5: v_add_nc_u32_e32 v2, s18, v4 v_add_nc_u32_e32 v4, s19, v4 v_add_nc_u32_e32 v1, s19, v1 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_ashrrev_i32_e32 v3, 31, v2 v_cmp_le_i32_e32 vcc_lo, s16, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[2:3] s_or_b32 s20, vcc_lo, s20 v_add_co_u32 v2, s0, s10, v2 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, s0, s11, v3, s0 global_store_b32 v[2:3], v5, off s_and_not1_b32 exec_lo, exec_lo, s20 s_cbranch_execz .LBB0_2 .LBB0_6: v_mov_b32_e32 v5, 0x7f800000 s_and_not1_b32 vcc_lo, exec_lo, s9 s_cbranch_vccnz .LBB0_5 v_dual_mov_b32 v5, 0x7f800000 :: v_dual_mov_b32 v2, v1 s_mov_b32 s0, s14 .LBB0_8: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_ashrrev_i32_e32 v3, 31, v2 s_add_i32 s0, s0, 1 s_cmp_ge_i32 s0, s15 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[6:7], 2, v[2:3] v_add_co_u32 v6, vcc_lo, s4, v6 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v7, vcc_lo, s5, v7, vcc_lo global_load_b32 v3, v[6:7], off s_waitcnt vmcnt(0) v_cmp_lt_f32_e32 vcc_lo, v3, v5 v_dual_cndmask_b32 v5, v5, v3 :: v_dual_add_nc_u32 v2, s16, v2 s_cbranch_scc0 .LBB0_8 s_branch .LBB0_5 .LBB0_9: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z13sec_min_cuda_iiPfPiS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 21 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z13sec_min_cuda_iiPfPiS_, .Lfunc_end0-_Z13sec_min_cuda_iiPfPiS_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .offset: 4 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z13sec_min_cuda_iiPfPiS_ .private_segment_fixed_size: 0 .sgpr_count: 23 .sgpr_spill_count: 0 .symbol: _Z13sec_min_cuda_iiPfPiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void sec_min_cuda_(int nProposal, int C, float *inp, int *offsets, float *out){ for(int p_id = blockIdx.x; p_id < nProposal; p_id += gridDim.x){ int start = offsets[p_id]; int end = offsets[p_id + 1]; for(int plane = threadIdx.x; plane < C; plane += blockDim.x){ float min_val = 1e50; for(int i = start; i < end; i++){ if(inp[i * C + plane] < min_val){ min_val = inp[i * C + plane]; } } out[p_id * C + plane] = min_val; } } }
.text .file "sec_min_cuda_.hip" .globl _Z28__device_stub__sec_min_cuda_iiPfPiS_ # -- Begin function _Z28__device_stub__sec_min_cuda_iiPfPiS_ .p2align 4, 0x90 .type _Z28__device_stub__sec_min_cuda_iiPfPiS_,@function _Z28__device_stub__sec_min_cuda_iiPfPiS_: # @_Z28__device_stub__sec_min_cuda_iiPfPiS_ .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 4(%rsp) movl %esi, (%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) leaq 4(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) leaq 72(%rsp), %rax movq %rax, 96(%rsp) leaq 64(%rsp), %rax movq %rax, 104(%rsp) leaq 56(%rsp), %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z13sec_min_cuda_iiPfPiS_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z28__device_stub__sec_min_cuda_iiPfPiS_, .Lfunc_end0-_Z28__device_stub__sec_min_cuda_iiPfPiS_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z13sec_min_cuda_iiPfPiS_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z13sec_min_cuda_iiPfPiS_,@object # @_Z13sec_min_cuda_iiPfPiS_ .section .rodata,"a",@progbits .globl _Z13sec_min_cuda_iiPfPiS_ .p2align 3, 0x0 _Z13sec_min_cuda_iiPfPiS_: .quad _Z28__device_stub__sec_min_cuda_iiPfPiS_ .size _Z13sec_min_cuda_iiPfPiS_, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z13sec_min_cuda_iiPfPiS_" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z28__device_stub__sec_min_cuda_iiPfPiS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z13sec_min_cuda_iiPfPiS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z13sec_min_cuda_iiPfPiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e240000002500 */ /*0020*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x160], PT ; /* 0x0000580002007a0c */ /* 0x001fda0003f06270 */ /*0030*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e220000002100 */ /*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x164], PT ; /* 0x0000590000007a0c */ /* 0x001fe20003f06270 */ /*0070*/ BSSY B0, 0xc50 ; /* 0x00000bd000007945 */ /* 0x000fd80003800000 */ /*0080*/ @P0 BRA 0xc40 ; /* 0x00000bb000000947 */ /* 0x000fea0003800000 */ /*0090*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4 ; /* 0x00000004ff097424 */ /* 0x000fc800078e00ff */ /*00a0*/ IMAD.WIDE R6, R2, R9, c[0x0][0x170] ; /* 0x00005c0002067625 */ /* 0x000fca00078e0209 */ /*00b0*/ LDG.E R3, [R6.64] ; /* 0x0000000406037981 */ /* 0x000ea8000c1e1900 */ /*00c0*/ LDG.E R4, [R6.64+0x4] ; /* 0x0000040406047981 */ /* 0x000ea4000c1e1900 */ /*00d0*/ ISETP.GT.AND P0, PT, R4, R3, PT ; /* 0x000000030400720c */ /* 0x004fda0003f04270 */ /*00e0*/ @P0 BRA 0x180 ; /* 0x0000009000000947 */ /* 0x000fea0003800000 */ /*00f0*/ IMAD.MOV.U32 R3, RZ, RZ, R0 ; /* 0x000000ffff037224 */ /* 0x000fe400078e0000 */ /*0100*/ IMAD.MOV.U32 R7, RZ, RZ, 0x7f800000 ; /* 0x7f800000ff077424 */ /* 0x000fe400078e00ff */ /*0110*/ IMAD R4, R2, c[0x0][0x164], R3 ; /* 0x0000590002047a24 */ /* 0x001fe200078e0203 */ /*0120*/ IADD3 R3, R3, c[0x0][0x0], RZ ; /* 0x0000000003037a10 */ /* 0x000fc60007ffe0ff */ /*0130*/ IMAD.WIDE R4, R4, R9, c[0x0][0x178] ; /* 0x00005e0004047625 */ /* 0x000fe200078e0209 */ /*0140*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x164], PT ; /* 0x0000590003007a0c */ /* 0x000fc80003f06270 */ /*0150*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x0001f2000c101904 */ /*0160*/ @!P0 BRA 0x110 ; /* 0xffffffa000008947 */ /* 0x000fea000383ffff */ /*0170*/ BRA 0xc40 ; /* 0x00000ac000007947 */ /* 0x000fea0003800000 */ /*0180*/ LOP3.LUT R5, RZ, R3, RZ, 0x33, !PT ; /* 0x00000003ff057212 */ /* 0x000fe200078e33ff */ /*0190*/ IMAD.IADD R9, R4.reuse, 0x1, -R3 ; /* 0x0000000104097824 */ /* 0x040fe200078e0a03 */ /*01a0*/ IADD3 R6, R3.reuse, 0x2, RZ ; /* 0x0000000203067810 */ /* 0x040fe20007ffe0ff */ /*01b0*/ IMAD.MOV.U32 R8, RZ, RZ, R0 ; /* 0x000000ffff087224 */ /* 0x000fe200078e0000 */ /*01c0*/ IADD3 R7, R3, 0x3, RZ ; /* 0x0000000303077810 */ /* 0x000fe20007ffe0ff */ /*01d0*/ IMAD.IADD R5, R4, 0x1, R5 ; /* 0x0000000104057824 */ /* 0x000fe200078e0205 */ /*01e0*/ LOP3.LUT R9, R9, 0x3, RZ, 0xc0, !PT ; /* 0x0000000309097812 */ /* 0x000fc800078ec0ff */ /*01f0*/ ISETP.GE.U32.AND P1, PT, R5, 0x3, PT ; /* 0x000000030500780c */ /* 0x000fe40003f26070 */ /*0200*/ IADD3 R5, R3, 0x1, RZ ; /* 0x0000000103057810 */ /* 0x000fe40007ffe0ff */ /*0210*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */ /* 0x000fe20003f05270 */ /*0220*/ IMAD.MOV.U32 R14, RZ, RZ, 0x7f800000 ; /* 0x7f800000ff0e7424 */ /* 0x001fe400078e00ff */ /*0230*/ IMAD.MOV.U32 R11, RZ, RZ, R3 ; /* 0x000000ffff0b7224 */ /* 0x000fd400078e0003 */ /*0240*/ @!P0 BRA 0x3a0 ; /* 0x0000015000008947 */ /* 0x000fea0003800000 */ /*0250*/ IMAD.MOV.U32 R17, RZ, RZ, 0x4 ; /* 0x00000004ff117424 */ /* 0x000fe400078e00ff */ /*0260*/ IMAD R10, R3, c[0x0][0x164], R8 ; /* 0x00005900030a7a24 */ /* 0x000fc800078e0208 */ /*0270*/ IMAD.WIDE R12, R10, R17, c[0x0][0x168] ; /* 0x00005a000a0c7625 */ /* 0x000fcc00078e0211 */ /*0280*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */ /* 0x000ea2000c1e1900 */ /*0290*/ ISETP.NE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */ /* 0x000fe20003f05270 */ /*02a0*/ IMAD.MOV.U32 R11, RZ, RZ, R5 ; /* 0x000000ffff0b7224 */ /* 0x000fe200078e0005 */ /*02b0*/ FMNMX R14, R12, +INF , PT ; /* 0x7f8000000c0e7809 */ /* 0x004fd60003800000 */ /*02c0*/ @!P0 BRA 0x3a0 ; /* 0x000000d000008947 */ /* 0x000fea0003800000 */ /*02d0*/ ISETP.NE.AND P2, PT, R9, 0x2, PT ; /* 0x000000020900780c */ /* 0x000fe40003f45270 */ /*02e0*/ IADD3 R10, R10, c[0x0][0x164], RZ ; /* 0x000059000a0a7a10 */ /* 0x000fca0007ffe0ff */ /*02f0*/ IMAD.WIDE R12, R10, R17, c[0x0][0x168] ; /* 0x00005a000a0c7625 */ /* 0x000fcc00078e0211 */ /*0300*/ @P2 IADD3 R16, R10, c[0x0][0x164], RZ ; /* 0x000059000a102a10 */ /* 0x000fe20007ffe0ff */ /*0310*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */ /* 0x000ea8000c1e1900 */ /*0320*/ @P2 IMAD.WIDE R16, R16, R17, c[0x0][0x168] ; /* 0x00005a0010102625 */ /* 0x000fcc00078e0211 */ /*0330*/ @P2 LDG.E R17, [R16.64] ; /* 0x0000000410112981 */ /* 0x000ee2000c1e1900 */ /*0340*/ IMAD.MOV.U32 R11, RZ, RZ, R6 ; /* 0x000000ffff0b7224 */ /* 0x000fe400078e0006 */ /*0350*/ @P2 IMAD.MOV.U32 R11, RZ, RZ, R7 ; /* 0x000000ffff0b2224 */ /* 0x000fe200078e0007 */ /*0360*/ FSETP.GEU.AND P0, PT, R13, R14, PT ; /* 0x0000000e0d00720b */ /* 0x004fc80003f0e000 */ /*0370*/ FSEL R14, R13, R14, !P0 ; /* 0x0000000e0d0e7208 */ /* 0x000fc80004000000 */ /*0380*/ @P2 FSETP.GEU.AND P0, PT, R17, R14, PT ; /* 0x0000000e1100220b */ /* 0x008fc80003f0e000 */ /*0390*/ @P2 FSEL R14, R17, R14, !P0 ; /* 0x0000000e110e2208 */ /* 0x000fc80004000000 */ /*03a0*/ @!P1 BRA 0xbd0 ; /* 0x0000082000009947 */ /* 0x000fea0003800000 */ /*03b0*/ IMAD.IADD R10, R4, 0x1, -R11 ; /* 0x00000001040a7824 */ /* 0x000fe200078e0a0b */ /*03c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fe20003f0f070 */ /*03d0*/ IMAD.MOV.U32 R12, RZ, RZ, 0x4 ; /* 0x00000004ff0c7424 */ /* 0x000fe400078e00ff */ /*03e0*/ IMAD R17, R11, c[0x0][0x164], R8 ; /* 0x000059000b117a24 */ /* 0x000fe200078e0208 */ /*03f0*/ ISETP.GT.AND P2, PT, R10, 0xc, PT ; /* 0x0000000c0a00780c */ /* 0x000fc60003f44270 */ /*0400*/ IMAD.WIDE R16, R17, R12, c[0x0][0x168] ; /* 0x00005a0011107625 */ /* 0x000fd400078e020c */ /*0410*/ @!P2 BRA 0x870 ; /* 0x000004500000a947 */ /* 0x000fea0003800000 */ /*0420*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0430*/ IADD3 R10, R4, -0xc, RZ ; /* 0xfffffff4040a7810 */ /* 0x000fe40007ffe0ff */ /*0440*/ LDG.E R15, [R16.64] ; /* 0x00000004100f7981 */ /* 0x0000a2000c1e1900 */ /*0450*/ IMAD.WIDE R26, R12, c[0x0][0x164], R16 ; /* 0x000059000c1a7a25 */ /* 0x000fca00078e0210 */ /*0460*/ LDG.E R22, [R26.64] ; /* 0x000000041a167981 */ /* 0x0002e2000c1e1900 */ /*0470*/ IMAD.WIDE R18, R12, c[0x0][0x164], R26 ; /* 0x000059000c127a25 */ /* 0x000fca00078e021a */ /*0480*/ LDG.E R29, [R18.64] ; /* 0x00000004121d7981 */ /* 0x000964000c1e1900 */ /*0490*/ IMAD.WIDE R18, R12, c[0x0][0x164], R18 ; /* 0x000059000c127a25 */ /* 0x010fca00078e0212 */ /*04a0*/ LDG.E R21, [R18.64] ; /* 0x0000000412157981 */ /* 0x000962000c1e1900 */ /*04b0*/ IMAD.WIDE R24, R12, c[0x0][0x164], R18 ; /* 0x000059000c187a25 */ /* 0x000fca00078e0212 */ /*04c0*/ LDG.E R13, [R24.64] ; /* 0x00000004180d7981 */ /* 0x000164000c1e1900 */ /*04d0*/ IMAD.WIDE R24, R12, c[0x0][0x164], R24 ; /* 0x000059000c187a25 */ /* 0x001fca00078e0218 */ /*04e0*/ LDG.E R20, [R24.64] ; /* 0x0000000418147981 */ /* 0x000162000c1e1900 */ /*04f0*/ IMAD.WIDE R16, R12, c[0x0][0x164], R24 ; /* 0x000059000c107a25 */ /* 0x000fca00078e0218 */ /*0500*/ LDG.E R28, [R16.64] ; /* 0x00000004101c7981 */ /* 0x000164000c1e1900 */ /*0510*/ IMAD.WIDE R16, R12, c[0x0][0x164], R16 ; /* 0x000059000c107a25 */ /* 0x001fca00078e0210 */ /*0520*/ LDG.E R27, [R16.64] ; /* 0x00000004101b7981 */ /* 0x002162000c1e1900 */ /*0530*/ IMAD.WIDE R18, R12, c[0x0][0x164], R16 ; /* 0x000059000c127a25 */ /* 0x010fca00078e0210 */ /*0540*/ LDG.E R26, [R18.64] ; /* 0x00000004121a7981 */ /* 0x000324000c1e1900 */ /*0550*/ IMAD.WIDE R18, R12, c[0x0][0x164], R18 ; /* 0x000059000c127a25 */ /* 0x002fca00078e0212 */ /*0560*/ LDG.E R25, [R18.64] ; /* 0x0000000412197981 */ /* 0x000322000c1e1900 */ /*0570*/ IMAD.WIDE R16, R12, c[0x0][0x164], R18 ; /* 0x000059000c107a25 */ /* 0x001fca00078e0212 */ /*0580*/ LDG.E R24, [R16.64] ; /* 0x0000000410187981 */ /* 0x000122000c1e1900 */ /*0590*/ FSETP.GEU.AND P2, PT, R15, R14, PT ; /* 0x0000000e0f00720b */ /* 0x004fc80003f4e000 */ /*05a0*/ FSEL R23, R15, R14, !P2 ; /* 0x0000000e0f177208 */ /* 0x000fe20005000000 */ /*05b0*/ IMAD.WIDE R14, R12, c[0x0][0x164], R16 ; /* 0x000059000c0e7a25 */ /* 0x000fc600078e0210 */ /*05c0*/ FSETP.GEU.AND P2, PT, R22, R23, PT ; /* 0x000000171600720b */ /* 0x008fc80003f4e000 */ /*05d0*/ FSEL R22, R22, R23, !P2 ; /* 0x0000001716167208 */ /* 0x000fe40005000000 */ /*05e0*/ LDG.E R23, [R14.64] ; /* 0x000000040e177981 */ /* 0x0004e2000c1e1900 */ /*05f0*/ IMAD.WIDE R16, R12, c[0x0][0x164], R14 ; /* 0x000059000c107a25 */ /* 0x001fe200078e020e */ /*0600*/ FSETP.GEU.AND P2, PT, R29, R22, PT ; /* 0x000000161d00720b */ /* 0x020fca0003f4e000 */ /*0610*/ IMAD.WIDE R18, R12, c[0x0][0x164], R16 ; /* 0x000059000c127a25 */ /* 0x002fe200078e0210 */ /*0620*/ LDG.E R14, [R16.64] ; /* 0x00000004100e7981 */ /* 0x0040a2000c1e1900 */ /*0630*/ FSEL R29, R29, R22, !P2 ; /* 0x000000161d1d7208 */ /* 0x000fc60005000000 */ /*0640*/ LDG.E R22, [R18.64] ; /* 0x0000000412167981 */ /* 0x000362000c1e1900 */ /*0650*/ FSETP.GEU.AND P2, PT, R21, R29, PT ; /* 0x0000001d1500720b */ /* 0x000fc80003f4e000 */ /*0660*/ FSEL R29, R21, R29, !P2 ; /* 0x0000001d151d7208 */ /* 0x000fe20005000000 */ /*0670*/ IMAD.WIDE R18, R12, c[0x0][0x164], R18 ; /* 0x000059000c127a25 */ /* 0x002fc600078e0212 */ /*0680*/ FSETP.GEU.AND P2, PT, R13.reuse, R29.reuse, PT ; /* 0x0000001d0d00720b */ /* 0x0c0fe40003f4e000 */ /*0690*/ LDG.E R21, [R18.64] ; /* 0x0000000412157981 */ /* 0x000f62000c1e1900 */ /*06a0*/ IMAD.WIDE R16, R12, c[0x0][0x164], R18 ; /* 0x000059000c107a25 */ /* 0x001fe200078e0212 */ /*06b0*/ FSEL R29, R13, R29, !P2 ; /* 0x0000001d0d1d7208 */ /* 0x000fc80005000000 */ /*06c0*/ LDG.E R13, [R16.64] ; /* 0x00000004100d7981 */ /* 0x000162000c1e1900 */ /*06d0*/ FSETP.GEU.AND P2, PT, R20, R29, PT ; /* 0x0000001d1400720b */ /* 0x000fc80003f4e000 */ /*06e0*/ FSEL R29, R20, R29, !P2 ; /* 0x0000001d141d7208 */ /* 0x000fc80005000000 */ /*06f0*/ FSETP.GEU.AND P2, PT, R28, R29, PT ; /* 0x0000001d1c00720b */ /* 0x000fc80003f4e000 */ /*0700*/ FSEL R28, R28, R29, !P2 ; /* 0x0000001d1c1c7208 */ /* 0x000fc80005000000 */ /*0710*/ FSETP.GEU.AND P2, PT, R27, R28, PT ; /* 0x0000001c1b00720b */ /* 0x000fc80003f4e000 */ /*0720*/ FSEL R27, R27, R28, !P2 ; /* 0x0000001c1b1b7208 */ /* 0x000fc80005000000 */ /*0730*/ FSETP.GEU.AND P2, PT, R26, R27, PT ; /* 0x0000001b1a00720b */ /* 0x010fc80003f4e000 */ /*0740*/ FSEL R26, R26, R27, !P2 ; /* 0x0000001b1a1a7208 */ /* 0x000fc80005000000 */ /*0750*/ FSETP.GEU.AND P2, PT, R25, R26, PT ; /* 0x0000001a1900720b */ /* 0x000fc80003f4e000 */ /*0760*/ FSEL R25, R25, R26, !P2 ; /* 0x0000001a19197208 */ /* 0x000fc80005000000 */ /*0770*/ FSETP.GEU.AND P2, PT, R24, R25, PT ; /* 0x000000191800720b */ /* 0x000fc80003f4e000 */ /*0780*/ FSEL R24, R24, R25, !P2 ; /* 0x0000001918187208 */ /* 0x000fe40005000000 */ /*0790*/ IADD3 R11, R11, 0x10, RZ ; /* 0x000000100b0b7810 */ /* 0x000fc80007ffe0ff */ /*07a0*/ ISETP.GE.AND P3, PT, R11, R10, PT ; /* 0x0000000a0b00720c */ /* 0x000fe20003f66270 */ /*07b0*/ IMAD.WIDE R16, R12, c[0x0][0x164], R16 ; /* 0x000059000c107a25 */ /* 0x001fe200078e0210 */ /*07c0*/ FSETP.GEU.AND P2, PT, R23, R24, PT ; /* 0x000000181700720b */ /* 0x008fc80003f4e000 */ /*07d0*/ FSEL R23, R23, R24, !P2 ; /* 0x0000001817177208 */ /* 0x000fc80005000000 */ /*07e0*/ FSETP.GEU.AND P2, PT, R14, R23, PT ; /* 0x000000170e00720b */ /* 0x004fc80003f4e000 */ /*07f0*/ FSEL R23, R14, R23, !P2 ; /* 0x000000170e177208 */ /* 0x000fc80005000000 */ /*0800*/ FSETP.GEU.AND P2, PT, R22, R23, PT ; /* 0x000000171600720b */ /* 0x020fc80003f4e000 */ /*0810*/ FSEL R22, R22, R23, !P2 ; /* 0x0000001716167208 */ /* 0x000fc80005000000 */ /*0820*/ FSETP.GEU.AND P2, PT, R21, R22, PT ; /* 0x000000161500720b */ /* 0x000fc80003f4e000 */ /*0830*/ FSEL R14, R21, R22, !P2 ; /* 0x00000016150e7208 */ /* 0x000fc80005000000 */ /*0840*/ FSETP.GEU.AND P2, PT, R13, R14, PT ; /* 0x0000000e0d00720b */ /* 0x000fc80003f4e000 */ /*0850*/ FSEL R14, R13, R14, !P2 ; /* 0x0000000e0d0e7208 */ /* 0x000fe20005000000 */ /*0860*/ @!P3 BRA 0x440 ; /* 0xfffffbd00000b947 */ /* 0x000fea000383ffff */ /*0870*/ IMAD.IADD R10, R4, 0x1, -R11 ; /* 0x00000001040a7824 */ /* 0x000fca00078e0a0b */ /*0880*/ ISETP.GT.AND P2, PT, R10, 0x4, PT ; /* 0x000000040a00780c */ /* 0x000fda0003f44270 */ /*0890*/ @!P2 BRA 0xac0 ; /* 0x000002200000a947 */ /* 0x000fea0003800000 */ /*08a0*/ LDG.E R13, [R16.64] ; /* 0x00000004100d7981 */ /* 0x0000a2000c1e1900 */ /*08b0*/ IMAD.WIDE R18, R12, c[0x0][0x164], R16 ; /* 0x000059000c127a25 */ /* 0x000fca00078e0210 */ /*08c0*/ LDG.E R10, [R18.64] ; /* 0x00000004120a7981 */ /* 0x0002e2000c1e1900 */ /*08d0*/ IMAD.WIDE R20, R12, c[0x0][0x164], R18 ; /* 0x000059000c147a25 */ /* 0x000fca00078e0212 */ /*08e0*/ LDG.E R15, [R20.64] ; /* 0x00000004140f7981 */ /* 0x000962000c1e1900 */ /*08f0*/ IMAD.WIDE R22, R12, c[0x0][0x164], R20 ; /* 0x000059000c167a25 */ /* 0x000fca00078e0214 */ /*0900*/ LDG.E R28, [R22.64] ; /* 0x00000004161c7981 */ /* 0x000f62000c1e1900 */ /*0910*/ IMAD.WIDE R24, R12, c[0x0][0x164], R22 ; /* 0x000059000c187a25 */ /* 0x000fca00078e0216 */ /*0920*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */ /* 0x000f62000c1e1900 */ /*0930*/ IMAD.WIDE R26, R12, c[0x0][0x164], R24 ; /* 0x000059000c1a7a25 */ /* 0x000fca00078e0218 */ /*0940*/ LDG.E R20, [R26.64] ; /* 0x000000041a147981 */ /* 0x010f22000c1e1900 */ /*0950*/ IMAD.WIDE R16, R12, c[0x0][0x164], R26 ; /* 0x000059000c107a25 */ /* 0x001fca00078e021a */ /*0960*/ LDG.E R21, [R16.64] ; /* 0x0000000410157981 */ /* 0x000122000c1e1900 */ /*0970*/ IMAD.WIDE R18, R12, c[0x0][0x164], R16 ; /* 0x000059000c127a25 */ /* 0x002fca00078e0210 */ /*0980*/ LDG.E R17, [R18.64] ; /* 0x0000000412117981 */ /* 0x001f22000c1e1900 */ /*0990*/ IADD3 R11, R11, 0x8, RZ ; /* 0x000000080b0b7810 */ /* 0x000fe40007ffe0ff */ /*09a0*/ FSETP.GEU.AND P0, PT, R13, R14, PT ; /* 0x0000000e0d00720b */ /* 0x004fc80003f0e000 */ /*09b0*/ FSEL R13, R13, R14, !P0 ; /* 0x0000000e0d0d7208 */ /* 0x000fc80004000000 */ /*09c0*/ FSETP.GEU.AND P0, PT, R10, R13, PT ; /* 0x0000000d0a00720b */ /* 0x008fc80003f0e000 */ /*09d0*/ FSEL R10, R10, R13, !P0 ; /* 0x0000000d0a0a7208 */ /* 0x000fc80004000000 */ /*09e0*/ FSETP.GEU.AND P0, PT, R15, R10, PT ; /* 0x0000000a0f00720b */ /* 0x020fc80003f0e000 */ /*09f0*/ FSEL R15, R15, R10, !P0 ; /* 0x0000000a0f0f7208 */ /* 0x000fc80004000000 */ /*0a00*/ FSETP.GEU.AND P0, PT, R28, R15, PT ; /* 0x0000000f1c00720b */ /* 0x000fc80003f0e000 */ /*0a10*/ FSEL R28, R28, R15, !P0 ; /* 0x0000000f1c1c7208 */ /* 0x000fc80004000000 */ /*0a20*/ FSETP.GEU.AND P0, PT, R29, R28, PT ; /* 0x0000001c1d00720b */ /* 0x000fc80003f0e000 */ /*0a30*/ FSEL R29, R29, R28, !P0 ; /* 0x0000001c1d1d7208 */ /* 0x000fc80004000000 */ /*0a40*/ FSETP.GEU.AND P0, PT, R20, R29, PT ; /* 0x0000001d1400720b */ /* 0x010fc80003f0e000 */ /*0a50*/ FSEL R20, R20, R29, !P0 ; /* 0x0000001d14147208 */ /* 0x000fc80004000000 */ /*0a60*/ FSETP.GEU.AND P0, PT, R21, R20, PT ; /* 0x000000141500720b */ /* 0x000fc80003f0e000 */ /*0a70*/ FSEL R14, R21, R20, !P0 ; /* 0x00000014150e7208 */ /* 0x000fe40004000000 */ /*0a80*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0a90*/ FSETP.GEU.AND P2, PT, R17, R14, PT ; /* 0x0000000e1100720b */ /* 0x000fc80003f4e000 */ /*0aa0*/ FSEL R14, R17, R14, !P2 ; /* 0x0000000e110e7208 */ /* 0x000fe20005000000 */ /*0ab0*/ IMAD.WIDE R16, R12, c[0x0][0x164], R18 ; /* 0x000059000c107a25 */ /* 0x000fc800078e0212 */ /*0ac0*/ ISETP.LT.OR P0, PT, R11, R4, P0 ; /* 0x000000040b00720c */ /* 0x000fda0000701670 */ /*0ad0*/ @!P0 BRA 0xbd0 ; /* 0x000000f000008947 */ /* 0x000fea0003800000 */ /*0ae0*/ LDG.E R15, [R16.64] ; /* 0x00000004100f7981 */ /* 0x000ea2000c1e1900 */ /*0af0*/ IMAD.WIDE R18, R12, c[0x0][0x164], R16 ; /* 0x000059000c127a25 */ /* 0x000fca00078e0210 */ /*0b00*/ LDG.E R21, [R18.64] ; /* 0x0000000412157981 */ /* 0x000ee2000c1e1900 */ /*0b10*/ IMAD.WIDE R10, R12, c[0x0][0x164], R18 ; /* 0x000059000c0a7a25 */ /* 0x000fca00078e0212 */ /*0b20*/ LDG.E R23, [R10.64] ; /* 0x000000040a177981 */ /* 0x000f22000c1e1900 */ /*0b30*/ IMAD.WIDE R12, R12, c[0x0][0x164], R10 ; /* 0x000059000c0c7a25 */ /* 0x000fcc00078e020a */ /*0b40*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */ /* 0x000f62000c1e1900 */ /*0b50*/ FSETP.GEU.AND P0, PT, R15, R14, PT ; /* 0x0000000e0f00720b */ /* 0x004fc80003f0e000 */ /*0b60*/ FSEL R14, R15, R14, !P0 ; /* 0x0000000e0f0e7208 */ /* 0x000fc80004000000 */ /*0b70*/ FSETP.GEU.AND P0, PT, R21, R14, PT ; /* 0x0000000e1500720b */ /* 0x008fc80003f0e000 */ /*0b80*/ FSEL R14, R21, R14, !P0 ; /* 0x0000000e150e7208 */ /* 0x000fc80004000000 */ /*0b90*/ FSETP.GEU.AND P0, PT, R23, R14, PT ; /* 0x0000000e1700720b */ /* 0x010fc80003f0e000 */ /*0ba0*/ FSEL R14, R23, R14, !P0 ; /* 0x0000000e170e7208 */ /* 0x000fc80004000000 */ /*0bb0*/ FSETP.GEU.AND P0, PT, R13, R14, PT ; /* 0x0000000e0d00720b */ /* 0x020fc80003f0e000 */ /*0bc0*/ FSEL R14, R13, R14, !P0 ; /* 0x0000000e0d0e7208 */ /* 0x000fe40004000000 */ /*0bd0*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */ /* 0x000fe400078e00ff */ /*0be0*/ IMAD R10, R2, c[0x0][0x164], R8 ; /* 0x00005900020a7a24 */ /* 0x000fe200078e0208 */ /*0bf0*/ IADD3 R8, R8, c[0x0][0x0], RZ ; /* 0x0000000008087a10 */ /* 0x000fc60007ffe0ff */ /*0c00*/ IMAD.WIDE R10, R10, R11, c[0x0][0x178] ; /* 0x00005e000a0a7625 */ /* 0x000fe200078e020b */ /*0c10*/ ISETP.GE.AND P0, PT, R8, c[0x0][0x164], PT ; /* 0x0000590008007a0c */ /* 0x000fc80003f06270 */ /*0c20*/ STG.E [R10.64], R14 ; /* 0x0000000e0a007986 */ /* 0x0001f2000c101904 */ /*0c30*/ @!P0 BRA 0x210 ; /* 0xfffff5d000008947 */ /* 0x000fea000383ffff */ /*0c40*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0c50*/ IADD3 R2, R2, c[0x0][0xc], RZ ; /* 0x0000030002027a10 */ /* 0x000fc80007ffe0ff */ /*0c60*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x160], PT ; /* 0x0000580002007a0c */ /* 0x000fda0003f06270 */ /*0c70*/ @!P0 BRA 0x60 ; /* 0xfffff3e000008947 */ /* 0x000fea000383ffff */ /*0c80*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0c90*/ BRA 0xc90; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0ca0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cb0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cc0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cd0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ce0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cf0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z13sec_min_cuda_iiPfPiS_ .globl _Z13sec_min_cuda_iiPfPiS_ .p2align 8 .type _Z13sec_min_cuda_iiPfPiS_,@function _Z13sec_min_cuda_iiPfPiS_: s_load_b32 s3, s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_cmp_ge_i32 s15, s3 s_cbranch_scc1 .LBB0_9 s_clause 0x3 s_load_b32 s16, s[0:1], 0x4 s_load_b128 s[4:7], s[0:1], 0x8 s_load_b64 s[10:11], s[0:1], 0x18 s_load_b32 s17, s[0:1], 0x20 s_add_u32 s12, s0, 32 s_mov_b32 s8, s15 s_addc_u32 s13, s1, 0 s_waitcnt lgkmcnt(0) v_cmp_gt_i32_e64 s2, s16, v0 s_branch .LBB0_3 .LBB0_2: s_set_inst_prefetch_distance 0x2 s_or_b32 exec_lo, exec_lo, s1 s_add_i32 s8, s17, s8 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_ge_i32 s8, s3 s_cbranch_scc1 .LBB0_9 .LBB0_3: s_delay_alu instid0(VALU_DEP_1) s_and_saveexec_b32 s1, s2 s_cbranch_execz .LBB0_2 s_ashr_i32 s9, s8, 31 v_mov_b32_e32 v4, v0 s_lshl_b64 s[14:15], s[8:9], 2 s_mul_i32 s18, s8, s16 s_add_u32 s14, s6, s14 s_addc_u32 s15, s7, s15 s_mov_b32 s20, 0 s_load_b64 s[14:15], s[14:15], 0x0 s_load_b32 s0, s[12:13], 0xc s_waitcnt lgkmcnt(0) v_mad_u64_u32 v[1:2], null, s16, s14, v[0:1] s_cmp_lt_i32 s14, s15 s_cselect_b32 s9, -1, 0 s_and_b32 s19, s0, 0xffff s_set_inst_prefetch_distance 0x1 s_branch .LBB0_6 .p2align 6 .LBB0_5: v_add_nc_u32_e32 v2, s18, v4 v_add_nc_u32_e32 v4, s19, v4 v_add_nc_u32_e32 v1, s19, v1 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_ashrrev_i32_e32 v3, 31, v2 v_cmp_le_i32_e32 vcc_lo, s16, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[2:3] s_or_b32 s20, vcc_lo, s20 v_add_co_u32 v2, s0, s10, v2 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, s0, s11, v3, s0 global_store_b32 v[2:3], v5, off s_and_not1_b32 exec_lo, exec_lo, s20 s_cbranch_execz .LBB0_2 .LBB0_6: v_mov_b32_e32 v5, 0x7f800000 s_and_not1_b32 vcc_lo, exec_lo, s9 s_cbranch_vccnz .LBB0_5 v_dual_mov_b32 v5, 0x7f800000 :: v_dual_mov_b32 v2, v1 s_mov_b32 s0, s14 .LBB0_8: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_ashrrev_i32_e32 v3, 31, v2 s_add_i32 s0, s0, 1 s_cmp_ge_i32 s0, s15 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[6:7], 2, v[2:3] v_add_co_u32 v6, vcc_lo, s4, v6 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v7, vcc_lo, s5, v7, vcc_lo global_load_b32 v3, v[6:7], off s_waitcnt vmcnt(0) v_cmp_lt_f32_e32 vcc_lo, v3, v5 v_dual_cndmask_b32 v5, v5, v3 :: v_dual_add_nc_u32 v2, s16, v2 s_cbranch_scc0 .LBB0_8 s_branch .LBB0_5 .LBB0_9: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z13sec_min_cuda_iiPfPiS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 21 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z13sec_min_cuda_iiPfPiS_, .Lfunc_end0-_Z13sec_min_cuda_iiPfPiS_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .offset: 4 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z13sec_min_cuda_iiPfPiS_ .private_segment_fixed_size: 0 .sgpr_count: 23 .sgpr_spill_count: 0 .symbol: _Z13sec_min_cuda_iiPfPiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0017e338_00000000-6_sec_min_cuda_.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z39__device_stub__Z13sec_min_cuda_iiPfPiS_iiPfPiS_ .type _Z39__device_stub__Z13sec_min_cuda_iiPfPiS_iiPfPiS_, @function _Z39__device_stub__Z13sec_min_cuda_iiPfPiS_iiPfPiS_: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 28(%rsp) movl %esi, 24(%rsp) movq %rdx, 16(%rsp) movq %rcx, 8(%rsp) movq %r8, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 24(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z13sec_min_cuda_iiPfPiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z39__device_stub__Z13sec_min_cuda_iiPfPiS_iiPfPiS_, .-_Z39__device_stub__Z13sec_min_cuda_iiPfPiS_iiPfPiS_ .globl _Z13sec_min_cuda_iiPfPiS_ .type _Z13sec_min_cuda_iiPfPiS_, @function _Z13sec_min_cuda_iiPfPiS_: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z13sec_min_cuda_iiPfPiS_iiPfPiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z13sec_min_cuda_iiPfPiS_, .-_Z13sec_min_cuda_iiPfPiS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z13sec_min_cuda_iiPfPiS_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z13sec_min_cuda_iiPfPiS_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "sec_min_cuda_.hip" .globl _Z28__device_stub__sec_min_cuda_iiPfPiS_ # -- Begin function _Z28__device_stub__sec_min_cuda_iiPfPiS_ .p2align 4, 0x90 .type _Z28__device_stub__sec_min_cuda_iiPfPiS_,@function _Z28__device_stub__sec_min_cuda_iiPfPiS_: # @_Z28__device_stub__sec_min_cuda_iiPfPiS_ .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 4(%rsp) movl %esi, (%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) leaq 4(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) leaq 72(%rsp), %rax movq %rax, 96(%rsp) leaq 64(%rsp), %rax movq %rax, 104(%rsp) leaq 56(%rsp), %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z13sec_min_cuda_iiPfPiS_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z28__device_stub__sec_min_cuda_iiPfPiS_, .Lfunc_end0-_Z28__device_stub__sec_min_cuda_iiPfPiS_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z13sec_min_cuda_iiPfPiS_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z13sec_min_cuda_iiPfPiS_,@object # @_Z13sec_min_cuda_iiPfPiS_ .section .rodata,"a",@progbits .globl _Z13sec_min_cuda_iiPfPiS_ .p2align 3, 0x0 _Z13sec_min_cuda_iiPfPiS_: .quad _Z28__device_stub__sec_min_cuda_iiPfPiS_ .size _Z13sec_min_cuda_iiPfPiS_, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z13sec_min_cuda_iiPfPiS_" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z28__device_stub__sec_min_cuda_iiPfPiS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z13sec_min_cuda_iiPfPiS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #define T 8 // As Threads #define N 16 __global__ void vecMatrixTransposed(int *A, int *B) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y+ threadIdx.y; int width = gridDim.x * T; for( int j = 0; j<T; j+=N ) { B[x*width + (j+y)] = A[(y+j)*width + x]; } } int main (int argc, char *argv[]) { int i,j; int size[N*N]; int A[N][N]; int sizearr = N*N *sizeof(int); int *Adefault,*B; for (i=0; i< N; i++) { for(j = 0 ; j<N ; j++ ) { A[i][j] = ((i*i) +1) * (j+1); printf("%5d ", A[i][j]); } } printf("\n"); cudaMalloc( (void**)&Adefault,sizearr); cudaMalloc( (void**)&B,sizearr); cudaMemcpy( Adefault, A, sizearr, cudaMemcpyHostToDevice); dim3 dimBlock(T,T); dim3 dimGrid((N+ dimBlock.x - 1)/ dimBlock.x ,(N + dimBlock.y - 1) / dimBlock.y); vecMatrixTransposed<<<dimGrid,dimBlock>>>(Adefault,B); cudaMemcpy(size, B, sizearr, cudaMemcpyDeviceToHost); cudaFree(Adefault); cudaFree(B); printf("Result\n"); int newline = 0; for (i=0; i < N * N; i++) { newline++; printf("%3d ",size[i]); if(newline == N) { newline = 0; printf("\n"); } } printf("\n"); return 0; }
code for sm_80 Function : _Z19vecMatrixTransposedPiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ ULDC UR4, c[0x0][0xc] ; /* 0x0000030000047ab9 */ /* 0x000fe20000000800 */ /*0030*/ HFMA2.MMA R4, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff047435 */ /* 0x000fe200000001ff */ /*0040*/ USHF.L.U32 UR4, UR4, 0x3, URZ ; /* 0x0000000304047899 */ /* 0x000fe2000800063f */ /*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e220000002100 */ /*0060*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fc60000000a00 */ /*0070*/ S2R R5, SR_CTAID.Y ; /* 0x0000000000057919 */ /* 0x000e680000002600 */ /*0080*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e620000002200 */ /*0090*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fe400078e0203 */ /*00a0*/ IMAD R5, R5, c[0x0][0x4], R2 ; /* 0x0000010005057a24 */ /* 0x002fc800078e0202 */ /*00b0*/ IMAD R2, R5, UR4, R0 ; /* 0x0000000405027c24 */ /* 0x000fc8000f8e0200 */ /*00c0*/ IMAD.WIDE R2, R2, R4, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fcc00078e0204 */ /*00d0*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */ /* 0x000ea2000c1e1900 */ /*00e0*/ IMAD R5, R0, UR4, R5 ; /* 0x0000000400057c24 */ /* 0x000fc8000f8e0205 */ /*00f0*/ IMAD.WIDE R4, R5, R4, c[0x0][0x168] ; /* 0x00005a0005047625 */ /* 0x000fca00078e0204 */ /*0100*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe2000c101906 */ /*0110*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0120*/ BRA 0x120; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #define T 8 // As Threads #define N 16 __global__ void vecMatrixTransposed(int *A, int *B) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y+ threadIdx.y; int width = gridDim.x * T; for( int j = 0; j<T; j+=N ) { B[x*width + (j+y)] = A[(y+j)*width + x]; } } int main (int argc, char *argv[]) { int i,j; int size[N*N]; int A[N][N]; int sizearr = N*N *sizeof(int); int *Adefault,*B; for (i=0; i< N; i++) { for(j = 0 ; j<N ; j++ ) { A[i][j] = ((i*i) +1) * (j+1); printf("%5d ", A[i][j]); } } printf("\n"); cudaMalloc( (void**)&Adefault,sizearr); cudaMalloc( (void**)&B,sizearr); cudaMemcpy( Adefault, A, sizearr, cudaMemcpyHostToDevice); dim3 dimBlock(T,T); dim3 dimGrid((N+ dimBlock.x - 1)/ dimBlock.x ,(N + dimBlock.y - 1) / dimBlock.y); vecMatrixTransposed<<<dimGrid,dimBlock>>>(Adefault,B); cudaMemcpy(size, B, sizearr, cudaMemcpyDeviceToHost); cudaFree(Adefault); cudaFree(B); printf("Result\n"); int newline = 0; for (i=0; i < N * N; i++) { newline++; printf("%3d ",size[i]); if(newline == N) { newline = 0; printf("\n"); } } printf("\n"); return 0; }
.file "tmpxft_00094a8b_00000000-6_cuda5.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z41__device_stub__Z19vecMatrixTransposedPiS_PiS_ .type _Z41__device_stub__Z19vecMatrixTransposedPiS_PiS_, @function _Z41__device_stub__Z19vecMatrixTransposedPiS_PiS_: .LFB2082: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z19vecMatrixTransposedPiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z41__device_stub__Z19vecMatrixTransposedPiS_PiS_, .-_Z41__device_stub__Z19vecMatrixTransposedPiS_PiS_ .globl _Z19vecMatrixTransposedPiS_ .type _Z19vecMatrixTransposedPiS_, @function _Z19vecMatrixTransposedPiS_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z41__device_stub__Z19vecMatrixTransposedPiS_PiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z19vecMatrixTransposedPiS_, .-_Z19vecMatrixTransposedPiS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%5d " .LC1: .string "\n" .LC2: .string "Result\n" .LC3: .string "%3d " .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $2120, %rsp .cfi_def_cfa_offset 2176 movq %fs:40, %rax movq %rax, 2104(%rsp) xorl %eax, %eax leaq 1136(%rsp), %r13 movl $0, %r15d leaq .LC0(%rip), %r14 .L12: movl %r15d, %r12d imull %r15d, %r12d addl $1, %r12d leaq -64(%r13), %rbp movl %r12d, %ebx .L13: movl %ebx, 0(%rbp) movl %ebx, %edx movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl %r12d, %ebx addq $4, %rbp cmpq %r13, %rbp jne .L13 addl $1, %r15d addq $64, %r13 cmpl $16, %r15d jne .L12 leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 8(%rsp), %rdi movl $1024, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movl $1024, %esi call cudaMalloc@PLT leaq 1072(%rsp), %rsi movl $1, %ecx movl $1024, %edx movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $1, 32(%rsp) movl $2, 36(%rsp) movl $2, 40(%rsp) movl $1, 44(%rsp) movl $8, 24(%rsp) movl $8, 28(%rsp) movl $0, %r9d movl $0, %r8d movq 24(%rsp), %rdx movl $1, %ecx movq 36(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L22 .L15: leaq 48(%rsp), %rbx movl $2, %ecx movl $1024, %edx movq 16(%rsp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 1072(%rsp), %r13 movl $0, %ebp leaq .LC3(%rip), %r12 leaq .LC1(%rip), %r14 jmp .L17 .L22: movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z41__device_stub__Z19vecMatrixTransposedPiS_PiS_ jmp .L15 .L16: addq $4, %rbx cmpq %r13, %rbx je .L23 .L17: addl $1, %ebp movl (%rbx), %edx movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT cmpl $16, %ebp jne .L16 movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $0, %ebp jmp .L16 .L23: leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 2104(%rsp), %rax subq %fs:40, %rax jne .L24 movl $0, %eax addq $2120, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L24: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC4: .string "_Z19vecMatrixTransposedPiS_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z19vecMatrixTransposedPiS_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #define T 8 // As Threads #define N 16 __global__ void vecMatrixTransposed(int *A, int *B) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y+ threadIdx.y; int width = gridDim.x * T; for( int j = 0; j<T; j+=N ) { B[x*width + (j+y)] = A[(y+j)*width + x]; } } int main (int argc, char *argv[]) { int i,j; int size[N*N]; int A[N][N]; int sizearr = N*N *sizeof(int); int *Adefault,*B; for (i=0; i< N; i++) { for(j = 0 ; j<N ; j++ ) { A[i][j] = ((i*i) +1) * (j+1); printf("%5d ", A[i][j]); } } printf("\n"); cudaMalloc( (void**)&Adefault,sizearr); cudaMalloc( (void**)&B,sizearr); cudaMemcpy( Adefault, A, sizearr, cudaMemcpyHostToDevice); dim3 dimBlock(T,T); dim3 dimGrid((N+ dimBlock.x - 1)/ dimBlock.x ,(N + dimBlock.y - 1) / dimBlock.y); vecMatrixTransposed<<<dimGrid,dimBlock>>>(Adefault,B); cudaMemcpy(size, B, sizearr, cudaMemcpyDeviceToHost); cudaFree(Adefault); cudaFree(B); printf("Result\n"); int newline = 0; for (i=0; i < N * N; i++) { newline++; printf("%3d ",size[i]); if(newline == N) { newline = 0; printf("\n"); } } printf("\n"); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #define T 8 // As Threads #define N 16 __global__ void vecMatrixTransposed(int *A, int *B) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y+ threadIdx.y; int width = gridDim.x * T; for( int j = 0; j<T; j+=N ) { B[x*width + (j+y)] = A[(y+j)*width + x]; } } int main (int argc, char *argv[]) { int i,j; int size[N*N]; int A[N][N]; int sizearr = N*N *sizeof(int); int *Adefault,*B; for (i=0; i< N; i++) { for(j = 0 ; j<N ; j++ ) { A[i][j] = ((i*i) +1) * (j+1); printf("%5d ", A[i][j]); } } printf("\n"); hipMalloc( (void**)&Adefault,sizearr); hipMalloc( (void**)&B,sizearr); hipMemcpy( Adefault, A, sizearr, hipMemcpyHostToDevice); dim3 dimBlock(T,T); dim3 dimGrid((N+ dimBlock.x - 1)/ dimBlock.x ,(N + dimBlock.y - 1) / dimBlock.y); vecMatrixTransposed<<<dimGrid,dimBlock>>>(Adefault,B); hipMemcpy(size, B, sizearr, hipMemcpyDeviceToHost); hipFree(Adefault); hipFree(B); printf("Result\n"); int newline = 0; for (i=0; i < N * N; i++) { newline++; printf("%3d ",size[i]); if(newline == N) { newline = 0; printf("\n"); } } printf("\n"); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #define T 8 // As Threads #define N 16 __global__ void vecMatrixTransposed(int *A, int *B) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y+ threadIdx.y; int width = gridDim.x * T; for( int j = 0; j<T; j+=N ) { B[x*width + (j+y)] = A[(y+j)*width + x]; } } int main (int argc, char *argv[]) { int i,j; int size[N*N]; int A[N][N]; int sizearr = N*N *sizeof(int); int *Adefault,*B; for (i=0; i< N; i++) { for(j = 0 ; j<N ; j++ ) { A[i][j] = ((i*i) +1) * (j+1); printf("%5d ", A[i][j]); } } printf("\n"); hipMalloc( (void**)&Adefault,sizearr); hipMalloc( (void**)&B,sizearr); hipMemcpy( Adefault, A, sizearr, hipMemcpyHostToDevice); dim3 dimBlock(T,T); dim3 dimGrid((N+ dimBlock.x - 1)/ dimBlock.x ,(N + dimBlock.y - 1) / dimBlock.y); vecMatrixTransposed<<<dimGrid,dimBlock>>>(Adefault,B); hipMemcpy(size, B, sizearr, hipMemcpyDeviceToHost); hipFree(Adefault); hipFree(B); printf("Result\n"); int newline = 0; for (i=0; i < N * N; i++) { newline++; printf("%3d ",size[i]); if(newline == N) { newline = 0; printf("\n"); } } printf("\n"); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z19vecMatrixTransposedPiS_ .globl _Z19vecMatrixTransposedPiS_ .p2align 8 .type _Z19vecMatrixTransposedPiS_,@function _Z19vecMatrixTransposedPiS_: s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b32 s4, s[0:1], 0x10 v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 v_mad_u64_u32 v[2:3], null, s14, s3, v[1:2] v_mad_u64_u32 v[3:4], null, s15, s2, v[0:1] s_lshl_b32 s4, s4, 3 s_load_b128 s[0:3], s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[0:1], null, v3, s4, v[2:3] v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_load_b32 v4, v[0:1], off s_waitcnt vmcnt(0) v_mad_u64_u32 v[0:1], null, v2, s4, v[3:4] v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[0:1] v_add_co_u32 v0, vcc_lo, s2, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo global_store_b32 v[0:1], v4, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z19vecMatrixTransposedPiS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z19vecMatrixTransposedPiS_, .Lfunc_end0-_Z19vecMatrixTransposedPiS_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z19vecMatrixTransposedPiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z19vecMatrixTransposedPiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #define T 8 // As Threads #define N 16 __global__ void vecMatrixTransposed(int *A, int *B) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y+ threadIdx.y; int width = gridDim.x * T; for( int j = 0; j<T; j+=N ) { B[x*width + (j+y)] = A[(y+j)*width + x]; } } int main (int argc, char *argv[]) { int i,j; int size[N*N]; int A[N][N]; int sizearr = N*N *sizeof(int); int *Adefault,*B; for (i=0; i< N; i++) { for(j = 0 ; j<N ; j++ ) { A[i][j] = ((i*i) +1) * (j+1); printf("%5d ", A[i][j]); } } printf("\n"); hipMalloc( (void**)&Adefault,sizearr); hipMalloc( (void**)&B,sizearr); hipMemcpy( Adefault, A, sizearr, hipMemcpyHostToDevice); dim3 dimBlock(T,T); dim3 dimGrid((N+ dimBlock.x - 1)/ dimBlock.x ,(N + dimBlock.y - 1) / dimBlock.y); vecMatrixTransposed<<<dimGrid,dimBlock>>>(Adefault,B); hipMemcpy(size, B, sizearr, hipMemcpyDeviceToHost); hipFree(Adefault); hipFree(B); printf("Result\n"); int newline = 0; for (i=0; i < N * N; i++) { newline++; printf("%3d ",size[i]); if(newline == N) { newline = 0; printf("\n"); } } printf("\n"); return 0; }
.text .file "cuda5.hip" .globl _Z34__device_stub__vecMatrixTransposedPiS_ # -- Begin function _Z34__device_stub__vecMatrixTransposedPiS_ .p2align 4, 0x90 .type _Z34__device_stub__vecMatrixTransposedPiS_,@function _Z34__device_stub__vecMatrixTransposedPiS_: # @_Z34__device_stub__vecMatrixTransposedPiS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z19vecMatrixTransposedPiS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z34__device_stub__vecMatrixTransposedPiS_, .Lfunc_end0-_Z34__device_stub__vecMatrixTransposedPiS_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $2136, %rsp # imm = 0x858 .cfi_def_cfa_offset 2192 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 leaq 1104(%rsp), %r14 movl $1, %ebp movl $1, %r15d xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_1: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_2 Depth 2 movl %ebp, %ebx xorl %r13d, %r13d .p2align 4, 0x90 .LBB1_2: # Parent Loop BB1_1 Depth=1 # => This Inner Loop Header: Depth=2 movl %ebx, (%r14,%r13,4) incq %r13 movl $.L.str, %edi movl %ebx, %esi xorl %eax, %eax callq printf addl %ebp, %ebx cmpq $16, %r13 jne .LBB1_2 # %bb.3: # in Loop: Header=BB1_1 Depth=1 incq %r12 addq $64, %r14 addl %r15d, %ebp addl $2, %r15d cmpq $16, %r12 jne .LBB1_1 # %bb.4: movl $10, %edi callq putchar@PLT leaq 8(%rsp), %rdi movl $1024, %esi # imm = 0x400 callq hipMalloc movq %rsp, %rdi movl $1024, %esi # imm = 0x400 callq hipMalloc movq 8(%rsp), %rdi leaq 1104(%rsp), %rsi movl $1024, %edx # imm = 0x400 movl $1, %ecx callq hipMemcpy movabsq $8589934594, %rdi # imm = 0x200000002 movabsq $34359738376, %rdx # imm = 0x800000008 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_6 # %bb.5: movq 8(%rsp), %rax movq (%rsp), %rcx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z19vecMatrixTransposedPiS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_6: movq (%rsp), %rsi leaq 80(%rsp), %rdi movl $1024, %edx # imm = 0x400 movl $2, %ecx callq hipMemcpy movq 8(%rsp), %rdi callq hipFree movq (%rsp), %rdi callq hipFree movl $.Lstr, %edi callq puts@PLT xorl %ebx, %ebx xorl %ebp, %ebp jmp .LBB1_7 .p2align 4, 0x90 .LBB1_9: # in Loop: Header=BB1_7 Depth=1 incq %rbx cmpq $256, %rbx # imm = 0x100 je .LBB1_10 .LBB1_7: # =>This Inner Loop Header: Depth=1 incl %ebp movl 80(%rsp,%rbx,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf cmpl $16, %ebp jne .LBB1_9 # %bb.8: # in Loop: Header=BB1_7 Depth=1 movl $10, %edi callq putchar@PLT xorl %ebp, %ebp jmp .LBB1_9 .LBB1_10: movl $10, %edi callq putchar@PLT xorl %eax, %eax addq $2136, %rsp # imm = 0x858 .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z19vecMatrixTransposedPiS_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z19vecMatrixTransposedPiS_,@object # @_Z19vecMatrixTransposedPiS_ .section .rodata,"a",@progbits .globl _Z19vecMatrixTransposedPiS_ .p2align 3, 0x0 _Z19vecMatrixTransposedPiS_: .quad _Z34__device_stub__vecMatrixTransposedPiS_ .size _Z19vecMatrixTransposedPiS_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%5d " .size .L.str, 5 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "%3d " .size .L.str.3, 5 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z19vecMatrixTransposedPiS_" .size .L__unnamed_1, 28 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Result" .size .Lstr, 7 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z34__device_stub__vecMatrixTransposedPiS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z19vecMatrixTransposedPiS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z19vecMatrixTransposedPiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ ULDC UR4, c[0x0][0xc] ; /* 0x0000030000047ab9 */ /* 0x000fe20000000800 */ /*0030*/ HFMA2.MMA R4, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff047435 */ /* 0x000fe200000001ff */ /*0040*/ USHF.L.U32 UR4, UR4, 0x3, URZ ; /* 0x0000000304047899 */ /* 0x000fe2000800063f */ /*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e220000002100 */ /*0060*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fc60000000a00 */ /*0070*/ S2R R5, SR_CTAID.Y ; /* 0x0000000000057919 */ /* 0x000e680000002600 */ /*0080*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e620000002200 */ /*0090*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fe400078e0203 */ /*00a0*/ IMAD R5, R5, c[0x0][0x4], R2 ; /* 0x0000010005057a24 */ /* 0x002fc800078e0202 */ /*00b0*/ IMAD R2, R5, UR4, R0 ; /* 0x0000000405027c24 */ /* 0x000fc8000f8e0200 */ /*00c0*/ IMAD.WIDE R2, R2, R4, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fcc00078e0204 */ /*00d0*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */ /* 0x000ea2000c1e1900 */ /*00e0*/ IMAD R5, R0, UR4, R5 ; /* 0x0000000400057c24 */ /* 0x000fc8000f8e0205 */ /*00f0*/ IMAD.WIDE R4, R5, R4, c[0x0][0x168] ; /* 0x00005a0005047625 */ /* 0x000fca00078e0204 */ /*0100*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe2000c101906 */ /*0110*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0120*/ BRA 0x120; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z19vecMatrixTransposedPiS_ .globl _Z19vecMatrixTransposedPiS_ .p2align 8 .type _Z19vecMatrixTransposedPiS_,@function _Z19vecMatrixTransposedPiS_: s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b32 s4, s[0:1], 0x10 v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 v_mad_u64_u32 v[2:3], null, s14, s3, v[1:2] v_mad_u64_u32 v[3:4], null, s15, s2, v[0:1] s_lshl_b32 s4, s4, 3 s_load_b128 s[0:3], s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[0:1], null, v3, s4, v[2:3] v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_load_b32 v4, v[0:1], off s_waitcnt vmcnt(0) v_mad_u64_u32 v[0:1], null, v2, s4, v[3:4] v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[0:1] v_add_co_u32 v0, vcc_lo, s2, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo global_store_b32 v[0:1], v4, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z19vecMatrixTransposedPiS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z19vecMatrixTransposedPiS_, .Lfunc_end0-_Z19vecMatrixTransposedPiS_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z19vecMatrixTransposedPiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z19vecMatrixTransposedPiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00094a8b_00000000-6_cuda5.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z41__device_stub__Z19vecMatrixTransposedPiS_PiS_ .type _Z41__device_stub__Z19vecMatrixTransposedPiS_PiS_, @function _Z41__device_stub__Z19vecMatrixTransposedPiS_PiS_: .LFB2082: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z19vecMatrixTransposedPiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z41__device_stub__Z19vecMatrixTransposedPiS_PiS_, .-_Z41__device_stub__Z19vecMatrixTransposedPiS_PiS_ .globl _Z19vecMatrixTransposedPiS_ .type _Z19vecMatrixTransposedPiS_, @function _Z19vecMatrixTransposedPiS_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z41__device_stub__Z19vecMatrixTransposedPiS_PiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z19vecMatrixTransposedPiS_, .-_Z19vecMatrixTransposedPiS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%5d " .LC1: .string "\n" .LC2: .string "Result\n" .LC3: .string "%3d " .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $2120, %rsp .cfi_def_cfa_offset 2176 movq %fs:40, %rax movq %rax, 2104(%rsp) xorl %eax, %eax leaq 1136(%rsp), %r13 movl $0, %r15d leaq .LC0(%rip), %r14 .L12: movl %r15d, %r12d imull %r15d, %r12d addl $1, %r12d leaq -64(%r13), %rbp movl %r12d, %ebx .L13: movl %ebx, 0(%rbp) movl %ebx, %edx movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl %r12d, %ebx addq $4, %rbp cmpq %r13, %rbp jne .L13 addl $1, %r15d addq $64, %r13 cmpl $16, %r15d jne .L12 leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 8(%rsp), %rdi movl $1024, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movl $1024, %esi call cudaMalloc@PLT leaq 1072(%rsp), %rsi movl $1, %ecx movl $1024, %edx movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $1, 32(%rsp) movl $2, 36(%rsp) movl $2, 40(%rsp) movl $1, 44(%rsp) movl $8, 24(%rsp) movl $8, 28(%rsp) movl $0, %r9d movl $0, %r8d movq 24(%rsp), %rdx movl $1, %ecx movq 36(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L22 .L15: leaq 48(%rsp), %rbx movl $2, %ecx movl $1024, %edx movq 16(%rsp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 1072(%rsp), %r13 movl $0, %ebp leaq .LC3(%rip), %r12 leaq .LC1(%rip), %r14 jmp .L17 .L22: movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z41__device_stub__Z19vecMatrixTransposedPiS_PiS_ jmp .L15 .L16: addq $4, %rbx cmpq %r13, %rbx je .L23 .L17: addl $1, %ebp movl (%rbx), %edx movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT cmpl $16, %ebp jne .L16 movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $0, %ebp jmp .L16 .L23: leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 2104(%rsp), %rax subq %fs:40, %rax jne .L24 movl $0, %eax addq $2120, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L24: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC4: .string "_Z19vecMatrixTransposedPiS_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z19vecMatrixTransposedPiS_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "cuda5.hip" .globl _Z34__device_stub__vecMatrixTransposedPiS_ # -- Begin function _Z34__device_stub__vecMatrixTransposedPiS_ .p2align 4, 0x90 .type _Z34__device_stub__vecMatrixTransposedPiS_,@function _Z34__device_stub__vecMatrixTransposedPiS_: # @_Z34__device_stub__vecMatrixTransposedPiS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z19vecMatrixTransposedPiS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z34__device_stub__vecMatrixTransposedPiS_, .Lfunc_end0-_Z34__device_stub__vecMatrixTransposedPiS_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $2136, %rsp # imm = 0x858 .cfi_def_cfa_offset 2192 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 leaq 1104(%rsp), %r14 movl $1, %ebp movl $1, %r15d xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_1: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_2 Depth 2 movl %ebp, %ebx xorl %r13d, %r13d .p2align 4, 0x90 .LBB1_2: # Parent Loop BB1_1 Depth=1 # => This Inner Loop Header: Depth=2 movl %ebx, (%r14,%r13,4) incq %r13 movl $.L.str, %edi movl %ebx, %esi xorl %eax, %eax callq printf addl %ebp, %ebx cmpq $16, %r13 jne .LBB1_2 # %bb.3: # in Loop: Header=BB1_1 Depth=1 incq %r12 addq $64, %r14 addl %r15d, %ebp addl $2, %r15d cmpq $16, %r12 jne .LBB1_1 # %bb.4: movl $10, %edi callq putchar@PLT leaq 8(%rsp), %rdi movl $1024, %esi # imm = 0x400 callq hipMalloc movq %rsp, %rdi movl $1024, %esi # imm = 0x400 callq hipMalloc movq 8(%rsp), %rdi leaq 1104(%rsp), %rsi movl $1024, %edx # imm = 0x400 movl $1, %ecx callq hipMemcpy movabsq $8589934594, %rdi # imm = 0x200000002 movabsq $34359738376, %rdx # imm = 0x800000008 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_6 # %bb.5: movq 8(%rsp), %rax movq (%rsp), %rcx movq %rax, 72(%rsp) movq %rcx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z19vecMatrixTransposedPiS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_6: movq (%rsp), %rsi leaq 80(%rsp), %rdi movl $1024, %edx # imm = 0x400 movl $2, %ecx callq hipMemcpy movq 8(%rsp), %rdi callq hipFree movq (%rsp), %rdi callq hipFree movl $.Lstr, %edi callq puts@PLT xorl %ebx, %ebx xorl %ebp, %ebp jmp .LBB1_7 .p2align 4, 0x90 .LBB1_9: # in Loop: Header=BB1_7 Depth=1 incq %rbx cmpq $256, %rbx # imm = 0x100 je .LBB1_10 .LBB1_7: # =>This Inner Loop Header: Depth=1 incl %ebp movl 80(%rsp,%rbx,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf cmpl $16, %ebp jne .LBB1_9 # %bb.8: # in Loop: Header=BB1_7 Depth=1 movl $10, %edi callq putchar@PLT xorl %ebp, %ebp jmp .LBB1_9 .LBB1_10: movl $10, %edi callq putchar@PLT xorl %eax, %eax addq $2136, %rsp # imm = 0x858 .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z19vecMatrixTransposedPiS_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z19vecMatrixTransposedPiS_,@object # @_Z19vecMatrixTransposedPiS_ .section .rodata,"a",@progbits .globl _Z19vecMatrixTransposedPiS_ .p2align 3, 0x0 _Z19vecMatrixTransposedPiS_: .quad _Z34__device_stub__vecMatrixTransposedPiS_ .size _Z19vecMatrixTransposedPiS_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%5d " .size .L.str, 5 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "%3d " .size .L.str.3, 5 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z19vecMatrixTransposedPiS_" .size .L__unnamed_1, 28 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Result" .size .Lstr, 7 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z34__device_stub__vecMatrixTransposedPiS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z19vecMatrixTransposedPiS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> #include <math.h> #include <iomanip> #include <fstream> #include <vector> #include <string> #include <sstream> using namespace std; //nvcc -o test_dominantcolorcu test_dominantcolor.cu -std=c++11 struct RGB{ unsigned int R,G,B; /*RGB(unsigned int _R, unsigned int _G, unsigned int _B){ R = _R; G = _G; B = _B; }*/ }; __global__ void comp(RGB* image_src, pair<int,int>* points, RGB color, int rows, int cols , int nropoints ){ //printf("%s", "//"); int index = threadIdx.x + blockIdx.x*blockDim.x; int x = ( (index/rows) < int(index/rows) ) ? int(index/rows) - 1 : (index/rows); int y = index % cols; //int c = 0; for( int i = 0 ; i < nropoints ; ++i){ if( points[i].first == x && points[i].second == y){ image_src[index] = color; } // c++; } //printf("%i %c", c,'\n'); } __global__ void add(int *a, int *b, int *c, int n){ int index = threadIdx.x + blockIdx.x*blockDim.x; c[index] = a[index] + b[index]; } __global__ void print(int *a){ if( a[blockIdx.x] != 0) printf("%d \n", blockIdx.x); } #define N (1000) #define M (10000) int space = 4; void printRGB(vector<vector<RGB>>& image_src){ for( int i = 0 ; i < image_src.size() ; ++i ){ for ( int j = 0 ; j < image_src[0].size(); ++j){ cout << (int)image_src[i][j].B << "\t" << (int)image_src[i][j].G << "\t" << (int)image_src[i][j].R << endl; } } } int main(void){ //Files to read ifstream points ("points.data"); ifstream image ("image.data"); if( !points.is_open() || !image.is_open()){ cout << "Problems opening file \n"; } int ncolors = 8; // # basic colors vector<vector<RGB>> image_src; //Matrix of image vector<vector<pair<int,int>>> v_points(ncolors); //Matrix of points according to ncolors //To read from file string line1, line2, sR, sG, sB; int G=0, B=0; //Reading dimension of image getline( image, line1 ); line2 = line1; line1 = line1.substr(0, space); line2 = line2.substr(space); if ( ! (istringstream(line1) >> B) ) B = 0; if ( ! (istringstream(line2) >> G) ) G = 0; //Resizing image_src image_src.resize( B ); for( int i = 0 ; i < B ; ++i) image_src[i].resize(G); time_t timer = time(0); //Reading image while( getline( image, line1 ) ){ line2 = line1; for( int i = 0 ; i < image_src.size(); ++i){ for( int j = 0 ; j < image_src[0].size(); ++j){ sB = line1.substr(0,space); sG = line1.substr(space,space); sR = line1.substr(space*2,space); if ( ! (istringstream(sB) >> image_src[i][j].B) ) image_src[i][j].B = 0; if ( ! (istringstream(sG) >> image_src[i][j].G) ) image_src[i][j].G = 0; if ( ! (istringstream(sR) >> image_src[i][j].R) ) image_src[i][j].R = 0; line1 = line1.substr( space*3 ); } } } image.close(); time_t timer2 = time(0); cout << "Reading image ::: " ; cout <<"Tiempo total: " << difftime(timer2, timer) << endl; timer = time(0); int i = -1; // while( !points.eof() ){ getline(points,line1); cout << line1 << endl; } //Reading points while( !points.eof() ){ if( line1 == "/////" ){ i++; //cout << i << endl; } getline( points, line1 ); while( line1.size() != 0 && line1 != "/////" ){ sB = line1.substr( 0, space ); sG = line1.substr( space, space ); //cout << sB << "\t" << sG << endl; if ( ! (istringstream(sB) >> B )) B = 0; if ( ! (istringstream(sG) >> G )) G = 0; v_points[i].push_back( pair<int,int>( B, G)); line1 = line1.substr( space*2 ); //cout << line1 << endl; } } timer2 = time(0); cout << "Reading points ::: " ; cout <<"Tiempo total: " << difftime(timer2, timer) << endl; //Counting points int sum = 0; for( int i = 0 ; i < ncolors ; ++i) sum += v_points[i].size(); cout << sum << endl; timer = time(0); pair<int,int> *ppoints; // vector of points RGB *pimage_src; // vector with image info pair<int,int> *d_points; // device copy of ppoints RGB *d_image_src; // device copy of pimage_src int sizeimag = image_src.size()*image_src[0].size(); // total size of Image // Allocate space for device copies of a,b,c cudaMalloc((void **)&d_points, sum); cudaMalloc((void **)&d_image_src, sizeimag); // Alloc space for host copies of a,b,c and setup input ppoints = (pair<int,int> *)malloc(sum*sizeof(int)*2); pimage_src = (RGB *)malloc(sizeimag); //Filling points to ppoints //cout << "Puntos" << endl; int sum2 = 0; for(int i = 0; i < ncolors; ++i){ for( int j = 0 ; j < v_points[i].size() ; ++j){ ppoints[i+j] = pair<int,int>( v_points[i][j].first, v_points[i][j].second ); sum2++; } } cout << sum2 << endl; //Filling RGB to pimage_src cout << "Image" << endl; for(int i = 0; i < image_src.size(); ++i){ for( int j = 0 ; j < image_src[0].size(); ++j){ RGB rgb; rgb.B = image_src[i][j].B; rgb.G = image_src[i][j].G; rgb.R = image_src[i][j].R; pimage_src[i+j] = rgb; } } vector<RGB> v_color; //8 Basic Colors RGB rgb; rgb.B = 0; rgb.G = 0; rgb.R = 0; v_color.push_back( rgb ); rgb.B = 0; rgb.G = 0; rgb.R = 255; v_color.push_back( rgb ); rgb.B = 0; rgb.G = 255; rgb.R = 0; v_color.push_back( rgb ); rgb.B = 0; rgb.G = 255; rgb.R = 255; v_color.push_back( rgb ); rgb.B = 255; rgb.G = 0; rgb.R = 0; v_color.push_back( rgb ); rgb.B = 255; rgb.G = 0; rgb.R = 255; v_color.push_back( rgb ); rgb.B = 255; rgb.G = 255; rgb.R = 255; v_color.push_back( rgb ); rgb.B = 255; rgb.G = 255; rgb.R = 255; v_color.push_back( rgb ); // Copy inputs to device cudaMemcpy(d_points, ppoints, sum*sizeof(int)*2, cudaMemcpyHostToDevice); // Args: Dir. destino, Dir. origen, tamano de dato, sentido del envio cudaMemcpy(d_image_src, pimage_src, sizeimag, cudaMemcpyHostToDevice); for( int i = 0 ; i < v_color.size(); ++i){ // Launch add() kernel on GPU comp<<<(N+M-1)/M,M>>> ( d_image_src, d_points, v_color[i], image_src.size(), image_src[0].size(), sum ); } // Copy result back to host cudaMemcpy( pimage_src, d_image_src, sizeimag, cudaMemcpyDeviceToHost); timer2 = time(0); cout << "Cuda process ::: " ; cout <<"Tiempo total: " << difftime(timer2, timer) << endl; timer = time(0); ofstream image_converted("image_conv.data"); image_converted << setw(space) << image_src.size() << setw(space) << image_src[0].size() << endl; //Writing new Mat image for( int i = 0 ; i < image_src.size() ; ++i ){ for( int j = 0 ; j < image_src[0].size() ; ++j ){ image_src[i][j] = pimage_src[i+j]; image_converted << setw(space) << image_src[i][j].B << setw(space) << image_src[i][j].G << setw(space) << image_src[i][j].R ; } } image_converted.close(); cout << "Writing image ::: " ; cout << "Tiempo total: " << difftime(timer2, timer) << endl; // Cleanup free(ppoints); free(pimage_src); cudaFree(d_points); cudaFree(d_image_src); return 0; }
code for sm_80 Function : _Z5printPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */ /* 0x000e220000002500 */ /*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0040*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */ /* 0x000fe40007ffe0ff */ /*0050*/ IMAD.WIDE.U32 R2, R8, R3, c[0x0][0x160] ; /* 0x0000580008027625 */ /* 0x001fcc00078e0003 */ /*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*0070*/ IADD3 R6, P1, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */ /* 0x000fe40007f3e0ff */ /*0080*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x004fda0003f05270 */ /*0090*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*00a0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe20000000f00 */ /*00b0*/ STL [R1], R8 ; /* 0x0000000801007387 */ /* 0x0001e20000100800 */ /*00c0*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P1 ; /* 0x00000900ff077624 */ /* 0x000fe400008e06ff */ /*00d0*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */ /* 0x0000620000000a00 */ /*00e0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */ /* 0x000fe400078e00ff */ /*00f0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */ /* 0x000fca00078e00ff */ /*0100*/ LEPC R8 ; /* 0x000000000008734e */ /* 0x001fe40000000000 */ /*0110*/ MOV R11, 0x180 ; /* 0x00000180000b7802 */ /* 0x000fe40000000f00 */ /*0120*/ MOV R20, 0x100 ; /* 0x0000010000147802 */ /* 0x000fe40000000f00 */ /*0130*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fc40000000f00 */ /*0140*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*0150*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */ /* 0x000fc8000791e108 */ /*0160*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2509 */ /*0170*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */ /* 0x002fea0003c00000 */ /*0180*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0190*/ BRA 0x190; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z3addPiS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */ /* 0x000e220000002100 */ /*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e240000002500 */ /*0050*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */ /* 0x001fca00078e0206 */ /*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x000fc800078e0207 */ /*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x0c0fe400078e0207 */ /*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1900 */ /*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea2000c1e1900 */ /*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fe200078e0207 */ /*00b0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */ /* 0x004fca0007ffe0ff */ /*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z4compP3RGBPSt4pairIiiES_iii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ IABS R0, c[0x0][0x180] ; /* 0x0000600000007a13 */ /* 0x000fe20000000000 */ /*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e220000002500 */ /*0030*/ IABS R2, c[0x0][0x17c] ; /* 0x00005f0000027a13 */ /* 0x000fe40000000000 */ /*0040*/ I2F.RP R9, R0 ; /* 0x0000000000097306 */ /* 0x000e620000209400 */ /*0050*/ S2R R10, SR_TID.X ; /* 0x00000000000a7919 */ /* 0x000e220000002100 */ /*0060*/ ISETP.NE.AND P1, PT, RZ, c[0x0][0x17c], PT ; /* 0x00005f00ff007a0c */ /* 0x000fcc0003f25270 */ /*0070*/ I2F.RP R8, R2 ; /* 0x0000000200087306 */ /* 0x000eb00000209400 */ /*0080*/ MUFU.RCP R9, R9 ; /* 0x0000000900097308 */ /* 0x002e700000001000 */ /*0090*/ MUFU.RCP R8, R8 ; /* 0x0000000800087308 */ /* 0x004ea20000001000 */ /*00a0*/ IMAD R3, R3, c[0x0][0x0], R10 ; /* 0x0000000003037a24 */ /* 0x001fe200078e020a */ /*00b0*/ IADD3 R6, R9, 0xffffffe, RZ ; /* 0x0ffffffe09067810 */ /* 0x002fcc0007ffe0ff */ /*00c0*/ F2I.FTZ.U32.TRUNC.NTZ R7, R6 ; /* 0x0000000600077305 */ /* 0x000062000021f000 */ /*00d0*/ IADD3 R4, R8, 0xffffffe, RZ ; /* 0x0ffffffe08047810 */ /* 0x004fe40007ffe0ff */ /*00e0*/ IABS R8, R3 ; /* 0x0000000300087213 */ /* 0x000fca0000000000 */ /*00f0*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */ /* 0x0004e2000021f000 */ /*0100*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */ /* 0x001fe400078e00ff */ /*0110*/ IMAD.MOV R13, RZ, RZ, -R7 ; /* 0x000000ffff0d7224 */ /* 0x002fe400078e0a07 */ /*0120*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */ /* 0x004fe400078e00ff */ /*0130*/ IMAD R13, R13, R0, RZ ; /* 0x000000000d0d7224 */ /* 0x000fe400078e02ff */ /*0140*/ IMAD.MOV R11, RZ, RZ, -R5 ; /* 0x000000ffff0b7224 */ /* 0x008fe400078e0a05 */ /*0150*/ IMAD.HI.U32 R6, R7, R13, R6 ; /* 0x0000000d07067227 */ /* 0x000fc800078e0006 */ /*0160*/ IMAD R9, R11, R2, RZ ; /* 0x000000020b097224 */ /* 0x000fe400078e02ff */ /*0170*/ IMAD.MOV.U32 R11, RZ, RZ, R8 ; /* 0x000000ffff0b7224 */ /* 0x000fe200078e0008 */ /*0180*/ LOP3.LUT R8, R3, c[0x0][0x17c], RZ, 0x3c, !PT ; /* 0x00005f0003087a12 */ /* 0x000fe200078e3cff */ /*0190*/ IMAD.HI.U32 R4, R5, R9, R4 ; /* 0x0000000905047227 */ /* 0x000fc600078e0004 */ /*01a0*/ ISETP.GE.AND P4, PT, R8, RZ, PT ; /* 0x000000ff0800720c */ /* 0x000fe20003f86270 */ /*01b0*/ IMAD.HI.U32 R6, R6, R11, RZ ; /* 0x0000000b06067227 */ /* 0x000fc800078e00ff */ /*01c0*/ IMAD.HI.U32 R4, R4, R11, RZ ; /* 0x0000000b04047227 */ /* 0x000fe200078e00ff */ /*01d0*/ IADD3 R6, -R6, RZ, RZ ; /* 0x000000ff06067210 */ /* 0x000fc60007ffe1ff */ /*01e0*/ IMAD.MOV R5, RZ, RZ, -R4 ; /* 0x000000ffff057224 */ /* 0x000fe400078e0a04 */ /*01f0*/ IMAD R6, R0, R6, R11.reuse ; /* 0x0000000600067224 */ /* 0x100fe400078e020b */ /*0200*/ IMAD R5, R2, R5, R11 ; /* 0x0000000502057224 */ /* 0x000fe400078e020b */ /*0210*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff077624 */ /* 0x000fe200078e00ff */ /*0220*/ ISETP.GT.U32.AND P0, PT, R0, R6, PT ; /* 0x000000060000720c */ /* 0x000fe40003f04070 */ /*0230*/ ISETP.GT.U32.AND P6, PT, R2, R5, PT ; /* 0x000000050200720c */ /* 0x000fe40003fc4070 */ /*0240*/ ISETP.GE.AND P5, PT, R7, 0x1, PT ; /* 0x000000010700780c */ /* 0x000fd20003fa6270 */ /*0250*/ @!P0 IMAD.IADD R6, R6, 0x1, -R0 ; /* 0x0000000106068824 */ /* 0x000fe200078e0a00 */ /*0260*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x180], PT ; /* 0x00006000ff007a0c */ /* 0x000fe20003f05270 */ /*0270*/ @!P6 IMAD.IADD R5, R5, 0x1, -R2 ; /* 0x000000010505e824 */ /* 0x000fe200078e0a02 */ /*0280*/ @!P6 IADD3 R4, R4, 0x1, RZ ; /* 0x000000010404e810 */ /* 0x000fe40007ffe0ff */ /*0290*/ ISETP.GT.U32.AND P2, PT, R0, R6, PT ; /* 0x000000060000720c */ /* 0x000fe40003f44070 */ /*02a0*/ ISETP.GE.U32.AND P3, PT, R5, R2, PT ; /* 0x000000020500720c */ /* 0x000fe40003f66070 */ /*02b0*/ ISETP.GE.AND P6, PT, R3, RZ, PT ; /* 0x000000ff0300720c */ /* 0x000fe20003fc6270 */ /*02c0*/ @!P5 EXIT ; /* 0x000000000000d94d */ /* 0x000fd80003800000 */ /*02d0*/ IADD3 R2, R7.reuse, -0x1, RZ ; /* 0xffffffff07027810 */ /* 0x040fe20007ffe0ff */ /*02e0*/ @!P2 IMAD.IADD R6, R6, 0x1, -R0 ; /* 0x000000010606a824 */ /* 0x000fe200078e0a00 */ /*02f0*/ @P3 IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104043810 */ /* 0x000fe20007ffe0ff */ /*0300*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fe20008000000 */ /*0310*/ ISETP.GE.U32.AND P2, PT, R2, 0x3, PT ; /* 0x000000030200780c */ /* 0x000fe20003f46070 */ /*0320*/ IMAD.MOV.U32 R2, RZ, RZ, 0xc ; /* 0x0000000cff027424 */ /* 0x000fe200078e00ff */ /*0330*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */ /* 0x000fe20000000a00 */ /*0340*/ IMAD.MOV.U32 R0, RZ, RZ, R4 ; /* 0x000000ffff007224 */ /* 0x000fe200078e0004 */ /*0350*/ LOP3.LUT R7, R7, 0x3, RZ, 0xc0, !PT ; /* 0x0000000307077812 */ /* 0x000fe200078ec0ff */ /*0360*/ @!P6 IMAD.MOV R6, RZ, RZ, -R6 ; /* 0x000000ffff06e224 */ /* 0x000fe200078e0a06 */ /*0370*/ @!P0 LOP3.LUT R6, RZ, c[0x0][0x180], RZ, 0x33, !PT ; /* 0x00006000ff068a12 */ /* 0x000fe200078e33ff */ /*0380*/ IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003027625 */ /* 0x000fe200078e0202 */ /*0390*/ @!P4 IADD3 R0, -R0, RZ, RZ ; /* 0x000000ff0000c210 */ /* 0x000fc40007ffe1ff */ /*03a0*/ @!P1 LOP3.LUT R0, RZ, c[0x0][0x17c], RZ, 0x33, !PT ; /* 0x00005f00ff009a12 */ /* 0x000fc600078e33ff */ /*03b0*/ @!P2 BRA 0x7a0 ; /* 0x000003e00000a947 */ /* 0x000fea0003800000 */ /*03c0*/ IADD3 R8, -R7, c[0x0][0x184], RZ ; /* 0x0000610007087a10 */ /* 0x000fe20007ffe1ff */ /*03d0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff047624 */ /* 0x000fe200078e00ff */ /*03e0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fe20008000000 */ /*03f0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff057624 */ /* 0x000fca00078e00ff */ /*0400*/ LDG.E R9, [R4.64] ; /* 0x0000000804097981 */ /* 0x001ea2000c1e1900 */ /*0410*/ BSSY B0, 0x4d0 ; /* 0x000000b000007945 */ /* 0x000fe20003800000 */ /*0420*/ ISETP.NE.AND P0, PT, R9, R0, PT ; /* 0x000000000900720c */ /* 0x004fda0003f05270 */ /*0430*/ @P0 BRA 0x4c0 ; /* 0x0000008000000947 */ /* 0x000fea0003800000 */ /*0440*/ LDG.E R9, [R4.64+0x4] ; /* 0x0000040804097981 */ /* 0x000ea4000c1e1900 */ /*0450*/ ISETP.NE.AND P0, PT, R9, R6, PT ; /* 0x000000060900720c */ /* 0x004fda0003f05270 */ /*0460*/ @!P0 IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff098624 */ /* 0x000fe400078e00ff */ /*0470*/ @!P0 IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff0b8624 */ /* 0x000fe400078e00ff */ /*0480*/ @!P0 IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff0d8624 */ /* 0x000fe200078e00ff */ /*0490*/ @!P0 STG.E [R2.64], R9 ; /* 0x0000000902008986 */ /* 0x0001e8000c101908 */ /*04a0*/ @!P0 STG.E [R2.64+0x4], R11 ; /* 0x0000040b02008986 */ /* 0x0001e8000c101908 */ /*04b0*/ @!P0 STG.E [R2.64+0x8], R13 ; /* 0x0000080d02008986 */ /* 0x0001e4000c101908 */ /*04c0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*04d0*/ LDG.E R9, [R4.64+0x8] ; /* 0x0000080804097981 */ /* 0x001ea2000c1e1900 */ /*04e0*/ BSSY B0, 0x5a0 ; /* 0x000000b000007945 */ /* 0x000fe20003800000 */ /*04f0*/ ISETP.NE.AND P0, PT, R9, R0, PT ; /* 0x000000000900720c */ /* 0x004fda0003f05270 */ /*0500*/ @P0 BRA 0x590 ; /* 0x0000008000000947 */ /* 0x000fea0003800000 */ /*0510*/ LDG.E R9, [R4.64+0xc] ; /* 0x00000c0804097981 */ /* 0x000ea4000c1e1900 */ /*0520*/ ISETP.NE.AND P0, PT, R9, R6, PT ; /* 0x000000060900720c */ /* 0x004fda0003f05270 */ /*0530*/ @!P0 MOV R9, c[0x0][0x170] ; /* 0x00005c0000098a02 */ /* 0x000fe20000000f00 */ /*0540*/ @!P0 IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff0b8624 */ /* 0x000fe400078e00ff */ /*0550*/ @!P0 IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff0d8624 */ /* 0x000fe400078e00ff */ /*0560*/ @!P0 STG.E [R2.64], R9 ; /* 0x0000000902008986 */ /* 0x0001e8000c101908 */ /*0570*/ @!P0 STG.E [R2.64+0x4], R11 ; /* 0x0000040b02008986 */ /* 0x0001e8000c101908 */ /*0580*/ @!P0 STG.E [R2.64+0x8], R13 ; /* 0x0000080d02008986 */ /* 0x0001e4000c101908 */ /*0590*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*05a0*/ LDG.E R9, [R4.64+0x10] ; /* 0x0000100804097981 */ /* 0x001ea2000c1e1900 */ /*05b0*/ BSSY B0, 0x670 ; /* 0x000000b000007945 */ /* 0x000fe20003800000 */ /*05c0*/ ISETP.NE.AND P0, PT, R9, R0, PT ; /* 0x000000000900720c */ /* 0x004fda0003f05270 */ /*05d0*/ @P0 BRA 0x660 ; /* 0x0000008000000947 */ /* 0x000fea0003800000 */ /*05e0*/ LDG.E R9, [R4.64+0x14] ; /* 0x0000140804097981 */ /* 0x000ea4000c1e1900 */ /*05f0*/ ISETP.NE.AND P0, PT, R9, R6, PT ; /* 0x000000060900720c */ /* 0x004fda0003f05270 */ /*0600*/ @!P0 IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff098624 */ /* 0x000fe400078e00ff */ /*0610*/ @!P0 IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff0b8624 */ /* 0x000fe400078e00ff */ /*0620*/ @!P0 IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff0d8624 */ /* 0x000fe200078e00ff */ /*0630*/ @!P0 STG.E [R2.64], R9 ; /* 0x0000000902008986 */ /* 0x0001e8000c101908 */ /*0640*/ @!P0 STG.E [R2.64+0x4], R11 ; /* 0x0000040b02008986 */ /* 0x0001e8000c101908 */ /*0650*/ @!P0 STG.E [R2.64+0x8], R13 ; /* 0x0000080d02008986 */ /* 0x0001e4000c101908 */ /*0660*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0670*/ LDG.E R9, [R4.64+0x18] ; /* 0x0000180804097981 */ /* 0x001ea2000c1e1900 */ /*0680*/ IADD3 R8, R8, -0x4, RZ ; /* 0xfffffffc08087810 */ /* 0x000fe20007ffe0ff */ /*0690*/ BSSY B0, 0x760 ; /* 0x000000c000007945 */ /* 0x000fe60003800000 */ /*06a0*/ ISETP.NE.AND P1, PT, R8, RZ, PT ; /* 0x000000ff0800720c */ /* 0x000fc40003f25270 */ /*06b0*/ ISETP.NE.AND P0, PT, R9, R0, PT ; /* 0x000000000900720c */ /* 0x004fda0003f05270 */ /*06c0*/ @P0 BRA 0x750 ; /* 0x0000008000000947 */ /* 0x000fea0003800000 */ /*06d0*/ LDG.E R9, [R4.64+0x1c] ; /* 0x00001c0804097981 */ /* 0x000ea4000c1e1900 */ /*06e0*/ ISETP.NE.AND P0, PT, R9, R6, PT ; /* 0x000000060900720c */ /* 0x004fda0003f05270 */ /*06f0*/ @!P0 MOV R9, c[0x0][0x170] ; /* 0x00005c0000098a02 */ /* 0x000fe20000000f00 */ /*0700*/ @!P0 IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff0b8624 */ /* 0x000fe400078e00ff */ /*0710*/ @!P0 IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff0d8624 */ /* 0x000fe400078e00ff */ /*0720*/ @!P0 STG.E [R2.64], R9 ; /* 0x0000000902008986 */ /* 0x0001e8000c101908 */ /*0730*/ @!P0 STG.E [R2.64+0x4], R11 ; /* 0x0000040b02008986 */ /* 0x0001e8000c101908 */ /*0740*/ @!P0 STG.E [R2.64+0x8], R13 ; /* 0x0000080d02008986 */ /* 0x0001e4000c101908 */ /*0750*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0760*/ IADD3 R4, P0, R4, 0x20, RZ ; /* 0x0000002004047810 */ /* 0x000fe20007f1e0ff */ /*0770*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */ /* 0x000fc8000fffe03f */ /*0780*/ IMAD.X R5, RZ, RZ, R5, P0 ; /* 0x000000ffff057224 */ /* 0x000fe200000e0605 */ /*0790*/ @P1 BRA 0x400 ; /* 0xfffffc6000001947 */ /* 0x000fea000383ffff */ /*07a0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */ /* 0x000fda0003f05270 */ /*07b0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*07c0*/ UMOV UR5, 0x8 ; /* 0x0000000800057882 */ /* 0x000fe40000000000 */ /*07d0*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */ /* 0x000fe40000000a00 */ /*07e0*/ UIMAD.WIDE UR6, UR4, UR5, UR6 ; /* 0x00000005040672a5 */ /* 0x000fc8000f8e0206 */ /*07f0*/ UIADD3 UR5, UP0, UR6, 0x4, URZ ; /* 0x0000000406057890 */ /* 0x000fc8000ff1e03f */ /*0800*/ UIADD3.X UR6, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f067290 */ /* 0x000fe400087fe43f */ /*0810*/ IMAD.U32 R8, RZ, RZ, UR5 ; /* 0x00000005ff087e24 */ /* 0x000fc8000f8e00ff */ /*0820*/ IMAD.U32 R5, RZ, RZ, UR6 ; /* 0x00000006ff057e24 */ /* 0x000fe4000f8e00ff */ /*0830*/ MOV R4, R8 ; /* 0x0000000800047202 */ /* 0x000fca0000000f00 */ /*0840*/ LDG.E R9, [R4.64+-0x4] ; /* 0xfffffc0804097981 */ /* 0x001ea2000c1e1900 */ /*0850*/ IADD3 R7, R7, -0x1, RZ ; /* 0xffffffff07077810 */ /* 0x000fe20007ffe0ff */ /*0860*/ BSSY B0, 0x940 ; /* 0x000000d000007945 */ /* 0x000fe20003800000 */ /*0870*/ IADD3 R8, P2, R4, 0x8, RZ ; /* 0x0000000804087810 */ /* 0x000fe40007f5e0ff */ /*0880*/ ISETP.NE.AND P1, PT, R7, RZ, PT ; /* 0x000000ff0700720c */ /* 0x000fe40003f25270 */ /*0890*/ ISETP.NE.AND P0, PT, R9, R0, PT ; /* 0x000000000900720c */ /* 0x004fda0003f05270 */ /*08a0*/ @P0 BRA 0x930 ; /* 0x0000008000000947 */ /* 0x000fea0003800000 */ /*08b0*/ LDG.E R9, [R4.64] ; /* 0x0000000804097981 */ /* 0x000ea4000c1e1900 */ /*08c0*/ ISETP.NE.AND P0, PT, R9, R6, PT ; /* 0x000000060900720c */ /* 0x004fda0003f05270 */ /*08d0*/ @!P0 IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff098624 */ /* 0x000fe400078e00ff */ /*08e0*/ @!P0 IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff0b8624 */ /* 0x000fe400078e00ff */ /*08f0*/ @!P0 IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff0d8624 */ /* 0x000fe200078e00ff */ /*0900*/ @!P0 STG.E [R2.64], R9 ; /* 0x0000000902008986 */ /* 0x0001e8000c101908 */ /*0910*/ @!P0 STG.E [R2.64+0x4], R11 ; /* 0x0000040b02008986 */ /* 0x0001e8000c101908 */ /*0920*/ @!P0 STG.E [R2.64+0x8], R13 ; /* 0x0000080d02008986 */ /* 0x0001e4000c101908 */ /*0930*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0940*/ IMAD.X R5, RZ, RZ, R5, P2 ; /* 0x000000ffff057224 */ /* 0x000fe200010e0605 */ /*0950*/ @P1 BRA 0x830 ; /* 0xfffffed000001947 */ /* 0x000fea000383ffff */ /*0960*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0970*/ BRA 0x970; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0980*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0990*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <math.h> #include <iomanip> #include <fstream> #include <vector> #include <string> #include <sstream> using namespace std; //nvcc -o test_dominantcolorcu test_dominantcolor.cu -std=c++11 struct RGB{ unsigned int R,G,B; /*RGB(unsigned int _R, unsigned int _G, unsigned int _B){ R = _R; G = _G; B = _B; }*/ }; __global__ void comp(RGB* image_src, pair<int,int>* points, RGB color, int rows, int cols , int nropoints ){ //printf("%s", "//"); int index = threadIdx.x + blockIdx.x*blockDim.x; int x = ( (index/rows) < int(index/rows) ) ? int(index/rows) - 1 : (index/rows); int y = index % cols; //int c = 0; for( int i = 0 ; i < nropoints ; ++i){ if( points[i].first == x && points[i].second == y){ image_src[index] = color; } // c++; } //printf("%i %c", c,'\n'); } __global__ void add(int *a, int *b, int *c, int n){ int index = threadIdx.x + blockIdx.x*blockDim.x; c[index] = a[index] + b[index]; } __global__ void print(int *a){ if( a[blockIdx.x] != 0) printf("%d \n", blockIdx.x); } #define N (1000) #define M (10000) int space = 4; void printRGB(vector<vector<RGB>>& image_src){ for( int i = 0 ; i < image_src.size() ; ++i ){ for ( int j = 0 ; j < image_src[0].size(); ++j){ cout << (int)image_src[i][j].B << "\t" << (int)image_src[i][j].G << "\t" << (int)image_src[i][j].R << endl; } } } int main(void){ //Files to read ifstream points ("points.data"); ifstream image ("image.data"); if( !points.is_open() || !image.is_open()){ cout << "Problems opening file \n"; } int ncolors = 8; // # basic colors vector<vector<RGB>> image_src; //Matrix of image vector<vector<pair<int,int>>> v_points(ncolors); //Matrix of points according to ncolors //To read from file string line1, line2, sR, sG, sB; int G=0, B=0; //Reading dimension of image getline( image, line1 ); line2 = line1; line1 = line1.substr(0, space); line2 = line2.substr(space); if ( ! (istringstream(line1) >> B) ) B = 0; if ( ! (istringstream(line2) >> G) ) G = 0; //Resizing image_src image_src.resize( B ); for( int i = 0 ; i < B ; ++i) image_src[i].resize(G); time_t timer = time(0); //Reading image while( getline( image, line1 ) ){ line2 = line1; for( int i = 0 ; i < image_src.size(); ++i){ for( int j = 0 ; j < image_src[0].size(); ++j){ sB = line1.substr(0,space); sG = line1.substr(space,space); sR = line1.substr(space*2,space); if ( ! (istringstream(sB) >> image_src[i][j].B) ) image_src[i][j].B = 0; if ( ! (istringstream(sG) >> image_src[i][j].G) ) image_src[i][j].G = 0; if ( ! (istringstream(sR) >> image_src[i][j].R) ) image_src[i][j].R = 0; line1 = line1.substr( space*3 ); } } } image.close(); time_t timer2 = time(0); cout << "Reading image ::: " ; cout <<"Tiempo total: " << difftime(timer2, timer) << endl; timer = time(0); int i = -1; // while( !points.eof() ){ getline(points,line1); cout << line1 << endl; } //Reading points while( !points.eof() ){ if( line1 == "/////" ){ i++; //cout << i << endl; } getline( points, line1 ); while( line1.size() != 0 && line1 != "/////" ){ sB = line1.substr( 0, space ); sG = line1.substr( space, space ); //cout << sB << "\t" << sG << endl; if ( ! (istringstream(sB) >> B )) B = 0; if ( ! (istringstream(sG) >> G )) G = 0; v_points[i].push_back( pair<int,int>( B, G)); line1 = line1.substr( space*2 ); //cout << line1 << endl; } } timer2 = time(0); cout << "Reading points ::: " ; cout <<"Tiempo total: " << difftime(timer2, timer) << endl; //Counting points int sum = 0; for( int i = 0 ; i < ncolors ; ++i) sum += v_points[i].size(); cout << sum << endl; timer = time(0); pair<int,int> *ppoints; // vector of points RGB *pimage_src; // vector with image info pair<int,int> *d_points; // device copy of ppoints RGB *d_image_src; // device copy of pimage_src int sizeimag = image_src.size()*image_src[0].size(); // total size of Image // Allocate space for device copies of a,b,c cudaMalloc((void **)&d_points, sum); cudaMalloc((void **)&d_image_src, sizeimag); // Alloc space for host copies of a,b,c and setup input ppoints = (pair<int,int> *)malloc(sum*sizeof(int)*2); pimage_src = (RGB *)malloc(sizeimag); //Filling points to ppoints //cout << "Puntos" << endl; int sum2 = 0; for(int i = 0; i < ncolors; ++i){ for( int j = 0 ; j < v_points[i].size() ; ++j){ ppoints[i+j] = pair<int,int>( v_points[i][j].first, v_points[i][j].second ); sum2++; } } cout << sum2 << endl; //Filling RGB to pimage_src cout << "Image" << endl; for(int i = 0; i < image_src.size(); ++i){ for( int j = 0 ; j < image_src[0].size(); ++j){ RGB rgb; rgb.B = image_src[i][j].B; rgb.G = image_src[i][j].G; rgb.R = image_src[i][j].R; pimage_src[i+j] = rgb; } } vector<RGB> v_color; //8 Basic Colors RGB rgb; rgb.B = 0; rgb.G = 0; rgb.R = 0; v_color.push_back( rgb ); rgb.B = 0; rgb.G = 0; rgb.R = 255; v_color.push_back( rgb ); rgb.B = 0; rgb.G = 255; rgb.R = 0; v_color.push_back( rgb ); rgb.B = 0; rgb.G = 255; rgb.R = 255; v_color.push_back( rgb ); rgb.B = 255; rgb.G = 0; rgb.R = 0; v_color.push_back( rgb ); rgb.B = 255; rgb.G = 0; rgb.R = 255; v_color.push_back( rgb ); rgb.B = 255; rgb.G = 255; rgb.R = 255; v_color.push_back( rgb ); rgb.B = 255; rgb.G = 255; rgb.R = 255; v_color.push_back( rgb ); // Copy inputs to device cudaMemcpy(d_points, ppoints, sum*sizeof(int)*2, cudaMemcpyHostToDevice); // Args: Dir. destino, Dir. origen, tamano de dato, sentido del envio cudaMemcpy(d_image_src, pimage_src, sizeimag, cudaMemcpyHostToDevice); for( int i = 0 ; i < v_color.size(); ++i){ // Launch add() kernel on GPU comp<<<(N+M-1)/M,M>>> ( d_image_src, d_points, v_color[i], image_src.size(), image_src[0].size(), sum ); } // Copy result back to host cudaMemcpy( pimage_src, d_image_src, sizeimag, cudaMemcpyDeviceToHost); timer2 = time(0); cout << "Cuda process ::: " ; cout <<"Tiempo total: " << difftime(timer2, timer) << endl; timer = time(0); ofstream image_converted("image_conv.data"); image_converted << setw(space) << image_src.size() << setw(space) << image_src[0].size() << endl; //Writing new Mat image for( int i = 0 ; i < image_src.size() ; ++i ){ for( int j = 0 ; j < image_src[0].size() ; ++j ){ image_src[i][j] = pimage_src[i+j]; image_converted << setw(space) << image_src[i][j].B << setw(space) << image_src[i][j].G << setw(space) << image_src[i][j].R ; } } image_converted.close(); cout << "Writing image ::: " ; cout << "Tiempo total: " << difftime(timer2, timer) << endl; // Cleanup free(ppoints); free(pimage_src); cudaFree(d_points); cudaFree(d_image_src); return 0; }
#include <hip/hip_runtime.h> #include <iostream> #include <math.h> #include <iomanip> #include <fstream> #include <vector> #include <string> #include <sstream> using namespace std; //nvcc -o test_dominantcolorcu test_dominantcolor.cu -std=c++11 struct RGB{ unsigned int R,G,B; /*RGB(unsigned int _R, unsigned int _G, unsigned int _B){ R = _R; G = _G; B = _B; }*/ }; __global__ void comp(RGB* image_src, pair<int,int>* points, RGB color, int rows, int cols , int nropoints ){ //printf("%s", "//"); int index = threadIdx.x + blockIdx.x*blockDim.x; int x = ( (index/rows) < int(index/rows) ) ? int(index/rows) - 1 : (index/rows); int y = index % cols; //int c = 0; for( int i = 0 ; i < nropoints ; ++i){ if( points[i].first == x && points[i].second == y){ image_src[index] = color; } // c++; } //printf("%i %c", c,'\n'); } __global__ void add(int *a, int *b, int *c, int n){ int index = threadIdx.x + blockIdx.x*blockDim.x; c[index] = a[index] + b[index]; } __global__ void print(int *a){ if( a[blockIdx.x] != 0) printf("%d \n", blockIdx.x); } #define N (1000) #define M (10000) int space = 4; void printRGB(vector<vector<RGB>>& image_src){ for( int i = 0 ; i < image_src.size() ; ++i ){ for ( int j = 0 ; j < image_src[0].size(); ++j){ cout << (int)image_src[i][j].B << "\t" << (int)image_src[i][j].G << "\t" << (int)image_src[i][j].R << endl; } } } int main(void){ //Files to read ifstream points ("points.data"); ifstream image ("image.data"); if( !points.is_open() || !image.is_open()){ cout << "Problems opening file \n"; } int ncolors = 8; // # basic colors vector<vector<RGB>> image_src; //Matrix of image vector<vector<pair<int,int>>> v_points(ncolors); //Matrix of points according to ncolors //To read from file string line1, line2, sR, sG, sB; int G=0, B=0; //Reading dimension of image getline( image, line1 ); line2 = line1; line1 = line1.substr(0, space); line2 = line2.substr(space); if ( ! (istringstream(line1) >> B) ) B = 0; if ( ! (istringstream(line2) >> G) ) G = 0; //Resizing image_src image_src.resize( B ); for( int i = 0 ; i < B ; ++i) image_src[i].resize(G); time_t timer = time(0); //Reading image while( getline( image, line1 ) ){ line2 = line1; for( int i = 0 ; i < image_src.size(); ++i){ for( int j = 0 ; j < image_src[0].size(); ++j){ sB = line1.substr(0,space); sG = line1.substr(space,space); sR = line1.substr(space*2,space); if ( ! (istringstream(sB) >> image_src[i][j].B) ) image_src[i][j].B = 0; if ( ! (istringstream(sG) >> image_src[i][j].G) ) image_src[i][j].G = 0; if ( ! (istringstream(sR) >> image_src[i][j].R) ) image_src[i][j].R = 0; line1 = line1.substr( space*3 ); } } } image.close(); time_t timer2 = time(0); cout << "Reading image ::: " ; cout <<"Tiempo total: " << difftime(timer2, timer) << endl; timer = time(0); int i = -1; // while( !points.eof() ){ getline(points,line1); cout << line1 << endl; } //Reading points while( !points.eof() ){ if( line1 == "/////" ){ i++; //cout << i << endl; } getline( points, line1 ); while( line1.size() != 0 && line1 != "/////" ){ sB = line1.substr( 0, space ); sG = line1.substr( space, space ); //cout << sB << "\t" << sG << endl; if ( ! (istringstream(sB) >> B )) B = 0; if ( ! (istringstream(sG) >> G )) G = 0; v_points[i].push_back( pair<int,int>( B, G)); line1 = line1.substr( space*2 ); //cout << line1 << endl; } } timer2 = time(0); cout << "Reading points ::: " ; cout <<"Tiempo total: " << difftime(timer2, timer) << endl; //Counting points int sum = 0; for( int i = 0 ; i < ncolors ; ++i) sum += v_points[i].size(); cout << sum << endl; timer = time(0); pair<int,int> *ppoints; // vector of points RGB *pimage_src; // vector with image info pair<int,int> *d_points; // device copy of ppoints RGB *d_image_src; // device copy of pimage_src int sizeimag = image_src.size()*image_src[0].size(); // total size of Image // Allocate space for device copies of a,b,c hipMalloc((void **)&d_points, sum); hipMalloc((void **)&d_image_src, sizeimag); // Alloc space for host copies of a,b,c and setup input ppoints = (pair<int,int> *)malloc(sum*sizeof(int)*2); pimage_src = (RGB *)malloc(sizeimag); //Filling points to ppoints //cout << "Puntos" << endl; int sum2 = 0; for(int i = 0; i < ncolors; ++i){ for( int j = 0 ; j < v_points[i].size() ; ++j){ ppoints[i+j] = pair<int,int>( v_points[i][j].first, v_points[i][j].second ); sum2++; } } cout << sum2 << endl; //Filling RGB to pimage_src cout << "Image" << endl; for(int i = 0; i < image_src.size(); ++i){ for( int j = 0 ; j < image_src[0].size(); ++j){ RGB rgb; rgb.B = image_src[i][j].B; rgb.G = image_src[i][j].G; rgb.R = image_src[i][j].R; pimage_src[i+j] = rgb; } } vector<RGB> v_color; //8 Basic Colors RGB rgb; rgb.B = 0; rgb.G = 0; rgb.R = 0; v_color.push_back( rgb ); rgb.B = 0; rgb.G = 0; rgb.R = 255; v_color.push_back( rgb ); rgb.B = 0; rgb.G = 255; rgb.R = 0; v_color.push_back( rgb ); rgb.B = 0; rgb.G = 255; rgb.R = 255; v_color.push_back( rgb ); rgb.B = 255; rgb.G = 0; rgb.R = 0; v_color.push_back( rgb ); rgb.B = 255; rgb.G = 0; rgb.R = 255; v_color.push_back( rgb ); rgb.B = 255; rgb.G = 255; rgb.R = 255; v_color.push_back( rgb ); rgb.B = 255; rgb.G = 255; rgb.R = 255; v_color.push_back( rgb ); // Copy inputs to device hipMemcpy(d_points, ppoints, sum*sizeof(int)*2, hipMemcpyHostToDevice); // Args: Dir. destino, Dir. origen, tamano de dato, sentido del envio hipMemcpy(d_image_src, pimage_src, sizeimag, hipMemcpyHostToDevice); for( int i = 0 ; i < v_color.size(); ++i){ // Launch add() kernel on GPU comp<<<(N+M-1)/M,M>>> ( d_image_src, d_points, v_color[i], image_src.size(), image_src[0].size(), sum ); } // Copy result back to host hipMemcpy( pimage_src, d_image_src, sizeimag, hipMemcpyDeviceToHost); timer2 = time(0); cout << "Cuda process ::: " ; cout <<"Tiempo total: " << difftime(timer2, timer) << endl; timer = time(0); ofstream image_converted("image_conv.data"); image_converted << setw(space) << image_src.size() << setw(space) << image_src[0].size() << endl; //Writing new Mat image for( int i = 0 ; i < image_src.size() ; ++i ){ for( int j = 0 ; j < image_src[0].size() ; ++j ){ image_src[i][j] = pimage_src[i+j]; image_converted << setw(space) << image_src[i][j].B << setw(space) << image_src[i][j].G << setw(space) << image_src[i][j].R ; } } image_converted.close(); cout << "Writing image ::: " ; cout << "Tiempo total: " << difftime(timer2, timer) << endl; // Cleanup free(ppoints); free(pimage_src); hipFree(d_points); hipFree(d_image_src); return 0; }
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
// source https://www.computer-graphics.se/hello-world-for-cuda.html // This is the REAL "hello world" for CUDA! // It takes the string "Hello ", prints it, then passes it to CUDA // with an array of offsets. Then the offsets are added in parallel // to produce the string "World!" // By Ingemar Ragnemalm 2010 // nvcc hello-world.cu -L /usr/local/cuda/lib -lcudart -o hello-world #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <unistd.h> const int N = 16; const int blocksize = 16; __global__ void hello(char *a, int *b) { int idx = threadIdx.x; // + 6; if (idx < N) a[idx] = a[threadIdx.x] + b[threadIdx.x]; } void exitWithFailure(cudaError err, int line) { printf("Error %d at line %d\n", err, line); exit(1); } int main() { char a[N] = "Hello \0\0\0\0\0\0"; int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; char *ad; int *bd; const int csize = N*sizeof(char); const int isize = N*sizeof(int); printf("%s", a); cudaError err; err = cudaMalloc( (void**)&ad, csize ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaMalloc( (void**)&bd, isize ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hello<<<dimGrid, dimBlock>>>(ad, bd); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaFree( ad ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaFree( bd ); printf("%s\n", a); return EXIT_SUCCESS; }
code for sm_80 Function : _Z5helloPcPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */ /* 0x000e240000002100 */ /*0020*/ ISETP.GT.AND P0, PT, R6, 0xf, PT ; /* 0x0000000f0600780c */ /* 0x001fda0003f04270 */ /*0030*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0040*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fe200000001ff */ /*0050*/ IADD3 R4, P0, R6, c[0x0][0x160], RZ ; /* 0x0000580006047a10 */ /* 0x000fe20007f1e0ff */ /*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc80000000a00 */ /*0070*/ IMAD.X R5, RZ, RZ, c[0x0][0x164], P0 ; /* 0x00005900ff057624 */ /* 0x000fc800000e06ff */ /*0080*/ IMAD.WIDE.U32 R2, R6.reuse, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */ /* 0x040fe400078e0003 */ /*0090*/ LDG.E.U8 R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea8000c1e1100 */ /*00a0*/ LDG.E.U8 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1100 */ /*00b0*/ LEA.HI.X.SX32 R7, R6, c[0x0][0x164], 0x1, P0 ; /* 0x0000590006077a11 */ /* 0x000fe200000f0eff */ /*00c0*/ IMAD.MOV.U32 R6, RZ, RZ, R4 ; /* 0x000000ffff067224 */ /* 0x000fe200078e0004 */ /*00d0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */ /* 0x004fca0007ffe0ff */ /*00e0*/ STG.E.U8 [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101104 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
// source https://www.computer-graphics.se/hello-world-for-cuda.html // This is the REAL "hello world" for CUDA! // It takes the string "Hello ", prints it, then passes it to CUDA // with an array of offsets. Then the offsets are added in parallel // to produce the string "World!" // By Ingemar Ragnemalm 2010 // nvcc hello-world.cu -L /usr/local/cuda/lib -lcudart -o hello-world #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <unistd.h> const int N = 16; const int blocksize = 16; __global__ void hello(char *a, int *b) { int idx = threadIdx.x; // + 6; if (idx < N) a[idx] = a[threadIdx.x] + b[threadIdx.x]; } void exitWithFailure(cudaError err, int line) { printf("Error %d at line %d\n", err, line); exit(1); } int main() { char a[N] = "Hello \0\0\0\0\0\0"; int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; char *ad; int *bd; const int csize = N*sizeof(char); const int isize = N*sizeof(int); printf("%s", a); cudaError err; err = cudaMalloc( (void**)&ad, csize ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaMalloc( (void**)&bd, isize ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hello<<<dimGrid, dimBlock>>>(ad, bd); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaFree( ad ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaFree( bd ); printf("%s\n", a); return EXIT_SUCCESS; }
.file "tmpxft_000a2264_00000000-6_hello-world.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2074: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2074: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Error %d at line %d\n" .text .globl _Z15exitWithFailure9cudaErrori .type _Z15exitWithFailure9cudaErrori, @function _Z15exitWithFailure9cudaErrori: .LFB2070: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $8, %rsp .cfi_def_cfa_offset 16 movl %edi, %edx movl %esi, %ecx leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .cfi_endproc .LFE2070: .size _Z15exitWithFailure9cudaErrori, .-_Z15exitWithFailure9cudaErrori .globl _Z26__device_stub__Z5helloPcPiPcPi .type _Z26__device_stub__Z5helloPcPiPcPi, @function _Z26__device_stub__Z5helloPcPiPcPi: .LFB2096: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L9 .L5: movq 104(%rsp), %rax subq %fs:40, %rax jne .L10 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z5helloPcPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L5 .L10: call __stack_chk_fail@PLT .cfi_endproc .LFE2096: .size _Z26__device_stub__Z5helloPcPiPcPi, .-_Z26__device_stub__Z5helloPcPiPcPi .globl _Z5helloPcPi .type _Z5helloPcPi, @function _Z5helloPcPi: .LFB2097: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z5helloPcPiPcPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2097: .size _Z5helloPcPi, .-_Z5helloPcPi .section .rodata.str1.1 .LC1: .string "%s" .LC2: .string "%s\n" .text .globl main .type main, @function main: .LFB2071: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $144, %rsp .cfi_def_cfa_offset 160 movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax movabsq $35662932501832, %rax movl $0, %edx movq %rax, 112(%rsp) movq %rdx, 120(%rsp) movl $15, 48(%rsp) movl $10, 52(%rsp) movl $6, 56(%rsp) movl $0, 60(%rsp) movl $-11, 64(%rsp) movl $1, 68(%rsp) movl $0, 72(%rsp) movl $0, 76(%rsp) movl $0, 80(%rsp) movl $0, 84(%rsp) movl $0, 88(%rsp) movl $0, 92(%rsp) movl $0, 96(%rsp) movl $0, 100(%rsp) movl $0, 104(%rsp) movl $0, 108(%rsp) leaq 112(%rsp), %rdx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 8(%rsp), %rdi movl $16, %esi call cudaMalloc@PLT testl %eax, %eax jne .L29 leaq 16(%rsp), %rdi movl $64, %esi call cudaMalloc@PLT testl %eax, %eax jne .L30 leaq 112(%rsp), %rsi movl $1, %ecx movl $16, %edx movq 8(%rsp), %rdi call cudaMemcpy@PLT testl %eax, %eax jne .L31 leaq 48(%rsp), %rsi movl $1, %ecx movl $64, %edx movq 16(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %ebx movl $16, 24(%rsp) movl $1, 28(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $0, %r9d movl $0, %r8d movq 24(%rsp), %rdx movl $1, %ecx movq 36(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L32 .L20: testl %ebx, %ebx jne .L33 leaq 112(%rsp), %rdi movl $2, %ecx movl $16, %edx movq 8(%rsp), %rsi call cudaMemcpy@PLT testl %eax, %eax jne .L34 movq 8(%rsp), %rdi call cudaFree@PLT testl %eax, %eax jne .L35 movq 16(%rsp), %rdi call cudaFree@PLT leaq 112(%rsp), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 136(%rsp), %rax subq %fs:40, %rax jne .L36 movl $0, %eax addq $144, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state movq 136(%rsp), %rdx subq %fs:40, %rdx jne .L37 movl $46, %esi movl %eax, %edi call _Z15exitWithFailure9cudaErrori .L37: call __stack_chk_fail@PLT .L30: movq 136(%rsp), %rdx subq %fs:40, %rdx jne .L38 movl $48, %esi movl %eax, %edi call _Z15exitWithFailure9cudaErrori .L38: call __stack_chk_fail@PLT .L31: movq 136(%rsp), %rdx subq %fs:40, %rdx jne .L39 movl $50, %esi movl %eax, %edi call _Z15exitWithFailure9cudaErrori .L39: call __stack_chk_fail@PLT .L32: movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z26__device_stub__Z5helloPcPiPcPi jmp .L20 .L33: movq 136(%rsp), %rax subq %fs:40, %rax jne .L40 movl $56, %esi movl %ebx, %edi call _Z15exitWithFailure9cudaErrori .L40: call __stack_chk_fail@PLT .L34: movq 136(%rsp), %rdx subq %fs:40, %rdx jne .L41 movl $58, %esi movl %eax, %edi call _Z15exitWithFailure9cudaErrori .L41: call __stack_chk_fail@PLT .L35: movq 136(%rsp), %rdx subq %fs:40, %rdx jne .L42 movl $60, %esi movl %eax, %edi call _Z15exitWithFailure9cudaErrori .L42: call __stack_chk_fail@PLT .L36: call __stack_chk_fail@PLT .cfi_endproc .LFE2071: .size main, .-main .section .rodata.str1.1 .LC3: .string "_Z5helloPcPi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2099: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z5helloPcPi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2099: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
// source https://www.computer-graphics.se/hello-world-for-cuda.html // This is the REAL "hello world" for CUDA! // It takes the string "Hello ", prints it, then passes it to CUDA // with an array of offsets. Then the offsets are added in parallel // to produce the string "World!" // By Ingemar Ragnemalm 2010 // nvcc hello-world.cu -L /usr/local/cuda/lib -lcudart -o hello-world #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <unistd.h> const int N = 16; const int blocksize = 16; __global__ void hello(char *a, int *b) { int idx = threadIdx.x; // + 6; if (idx < N) a[idx] = a[threadIdx.x] + b[threadIdx.x]; } void exitWithFailure(cudaError err, int line) { printf("Error %d at line %d\n", err, line); exit(1); } int main() { char a[N] = "Hello \0\0\0\0\0\0"; int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; char *ad; int *bd; const int csize = N*sizeof(char); const int isize = N*sizeof(int); printf("%s", a); cudaError err; err = cudaMalloc( (void**)&ad, csize ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaMalloc( (void**)&bd, isize ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hello<<<dimGrid, dimBlock>>>(ad, bd); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaFree( ad ); if (err != cudaSuccess) exitWithFailure(err, __LINE__); err = cudaFree( bd ); printf("%s\n", a); return EXIT_SUCCESS; }
// source https://www.computer-graphics.se/hello-world-for-cuda.html // This is the REAL "hello world" for CUDA! // It takes the string "Hello ", prints it, then passes it to CUDA // with an array of offsets. Then the offsets are added in parallel // to produce the string "World!" // By Ingemar Ragnemalm 2010 // nvcc hello-world.cu -L /usr/local/cuda/lib -lcudart -o hello-world #include <hip/hip_runtime.h> #include <stdio.h> #include <unistd.h> const int N = 16; const int blocksize = 16; __global__ void hello(char *a, int *b) { int idx = threadIdx.x; // + 6; if (idx < N) a[idx] = a[threadIdx.x] + b[threadIdx.x]; } void exitWithFailure(hipError_t err, int line) { printf("Error %d at line %d\n", err, line); exit(1); } int main() { char a[N] = "Hello \0\0\0\0\0\0"; int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; char *ad; int *bd; const int csize = N*sizeof(char); const int isize = N*sizeof(int); printf("%s", a); hipError_t err; err = hipMalloc( (void**)&ad, csize ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipMalloc( (void**)&bd, isize ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipMemcpy( ad, a, csize, hipMemcpyHostToDevice ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipMemcpy( bd, b, isize, hipMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hello<<<dimGrid, dimBlock>>>(ad, bd); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipFree( ad ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipFree( bd ); printf("%s\n", a); return EXIT_SUCCESS; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
// source https://www.computer-graphics.se/hello-world-for-cuda.html // This is the REAL "hello world" for CUDA! // It takes the string "Hello ", prints it, then passes it to CUDA // with an array of offsets. Then the offsets are added in parallel // to produce the string "World!" // By Ingemar Ragnemalm 2010 // nvcc hello-world.cu -L /usr/local/cuda/lib -lcudart -o hello-world #include <hip/hip_runtime.h> #include <stdio.h> #include <unistd.h> const int N = 16; const int blocksize = 16; __global__ void hello(char *a, int *b) { int idx = threadIdx.x; // + 6; if (idx < N) a[idx] = a[threadIdx.x] + b[threadIdx.x]; } void exitWithFailure(hipError_t err, int line) { printf("Error %d at line %d\n", err, line); exit(1); } int main() { char a[N] = "Hello \0\0\0\0\0\0"; int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; char *ad; int *bd; const int csize = N*sizeof(char); const int isize = N*sizeof(int); printf("%s", a); hipError_t err; err = hipMalloc( (void**)&ad, csize ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipMalloc( (void**)&bd, isize ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipMemcpy( ad, a, csize, hipMemcpyHostToDevice ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipMemcpy( bd, b, isize, hipMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hello<<<dimGrid, dimBlock>>>(ad, bd); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipFree( ad ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipFree( bd ); printf("%s\n", a); return EXIT_SUCCESS; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z5helloPcPi .globl _Z5helloPcPi .p2align 8 .type _Z5helloPcPi,@function _Z5helloPcPi: s_mov_b32 s2, exec_lo v_cmpx_gt_u32_e32 16, v0 s_cbranch_execz .LBB0_2 s_load_b128 s[0:3], s[0:1], 0x0 v_lshlrev_b32_e32 v1, 2, v0 s_waitcnt lgkmcnt(0) global_load_u8 v2, v0, s[0:1] global_load_b32 v1, v1, s[2:3] s_waitcnt vmcnt(0) v_add_nc_u16 v1, v2, v1 global_store_b8 v0, v1, s[0:1] .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z5helloPcPi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 16 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 3 .amdhsa_next_free_sgpr 4 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z5helloPcPi, .Lfunc_end0-_Z5helloPcPi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 16 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z5helloPcPi .private_segment_fixed_size: 0 .sgpr_count: 4 .sgpr_spill_count: 0 .symbol: _Z5helloPcPi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 3 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
// source https://www.computer-graphics.se/hello-world-for-cuda.html // This is the REAL "hello world" for CUDA! // It takes the string "Hello ", prints it, then passes it to CUDA // with an array of offsets. Then the offsets are added in parallel // to produce the string "World!" // By Ingemar Ragnemalm 2010 // nvcc hello-world.cu -L /usr/local/cuda/lib -lcudart -o hello-world #include <hip/hip_runtime.h> #include <stdio.h> #include <unistd.h> const int N = 16; const int blocksize = 16; __global__ void hello(char *a, int *b) { int idx = threadIdx.x; // + 6; if (idx < N) a[idx] = a[threadIdx.x] + b[threadIdx.x]; } void exitWithFailure(hipError_t err, int line) { printf("Error %d at line %d\n", err, line); exit(1); } int main() { char a[N] = "Hello \0\0\0\0\0\0"; int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; char *ad; int *bd; const int csize = N*sizeof(char); const int isize = N*sizeof(int); printf("%s", a); hipError_t err; err = hipMalloc( (void**)&ad, csize ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipMalloc( (void**)&bd, isize ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipMemcpy( ad, a, csize, hipMemcpyHostToDevice ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipMemcpy( bd, b, isize, hipMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hello<<<dimGrid, dimBlock>>>(ad, bd); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipFree( ad ); if (err != hipSuccess) exitWithFailure(err, __LINE__); err = hipFree( bd ); printf("%s\n", a); return EXIT_SUCCESS; }
.text .file "hello-world.hip" .globl _Z20__device_stub__helloPcPi # -- Begin function _Z20__device_stub__helloPcPi .p2align 4, 0x90 .type _Z20__device_stub__helloPcPi,@function _Z20__device_stub__helloPcPi: # @_Z20__device_stub__helloPcPi .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z5helloPcPi, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z20__device_stub__helloPcPi, .Lfunc_end0-_Z20__device_stub__helloPcPi .cfi_endproc # -- End function .globl _Z15exitWithFailure10hipError_ti # -- Begin function _Z15exitWithFailure10hipError_ti .p2align 4, 0x90 .type _Z15exitWithFailure10hipError_ti,@function _Z15exitWithFailure10hipError_ti: # @_Z15exitWithFailure10hipError_ti .cfi_startproc # %bb.0: pushq %rax .cfi_def_cfa_offset 16 movl %esi, %edx movl %edi, %esi movl $.L.str, %edi xorl %eax, %eax callq printf movl $1, %edi callq exit .Lfunc_end1: .size _Z15exitWithFailure10hipError_ti, .Lfunc_end1-_Z15exitWithFailure10hipError_ti .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $192, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -16 movl $1819043144, 16(%rsp) # imm = 0x6C6C6548 movw $8303, 20(%rsp) # imm = 0x206F movq $0, 22(%rsp) movw $0, 30(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 128(%rsp) movaps %xmm0, 144(%rsp) movaps %xmm0, 176(%rsp) movaps %xmm0, 160(%rsp) movabsq $42949672975, %rax # imm = 0xA0000000F movq %rax, 128(%rsp) movl $6, 136(%rsp) movabsq $8589934581, %rax # imm = 0x1FFFFFFF5 movq %rax, 144(%rsp) leaq 16(%rsp), %rsi movl $.L.str.1, %edi xorl %eax, %eax callq printf leaq 8(%rsp), %rdi movl $16, %esi callq hipMalloc testl %eax, %eax jne .LBB2_1 # %bb.3: leaq 40(%rsp), %rdi movl $64, %esi callq hipMalloc testl %eax, %eax jne .LBB2_4 # %bb.5: movq 8(%rsp), %rdi leaq 16(%rsp), %rsi movl $16, %edx movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_6 # %bb.7: movq 40(%rsp), %rdi leaq 128(%rsp), %rsi movl $64, %edx movl $1, %ecx callq hipMemcpy movl %eax, %ebx movabsq $4294967297, %rdi # imm = 0x100000001 leaq 15(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_9 # %bb.8: movq 8(%rsp), %rax movq 40(%rsp), %rcx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 80(%rsp), %rdi leaq 64(%rsp), %rsi leaq 56(%rsp), %rdx leaq 48(%rsp), %rcx callq __hipPopCallConfiguration movq 80(%rsp), %rsi movl 88(%rsp), %edx movq 64(%rsp), %rcx movl 72(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z5helloPcPi, %edi pushq 48(%rsp) .cfi_adjust_cfa_offset 8 pushq 64(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_9: testl %ebx, %ebx jne .LBB2_10 # %bb.11: movq 8(%rsp), %rsi leaq 16(%rsp), %rdi movl $16, %edx movl $2, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_12 # %bb.13: movq 8(%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB2_14 # %bb.15: movq 40(%rsp), %rdi callq hipFree leaq 16(%rsp), %rdi callq puts@PLT xorl %eax, %eax addq $192, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .LBB2_1: .cfi_def_cfa_offset 208 movl $.L.str, %edi movl %eax, %esi movl $46, %edx jmp .LBB2_2 .LBB2_4: movl $.L.str, %edi movl %eax, %esi movl $48, %edx jmp .LBB2_2 .LBB2_6: movl $.L.str, %edi movl %eax, %esi movl $50, %edx jmp .LBB2_2 .LBB2_10: movl $.L.str, %edi movl %ebx, %esi movl $56, %edx jmp .LBB2_2 .LBB2_12: movl $.L.str, %edi movl %eax, %esi movl $58, %edx jmp .LBB2_2 .LBB2_14: movl $.L.str, %edi movl %eax, %esi movl $60, %edx .LBB2_2: xorl %eax, %eax callq printf movl $1, %edi callq exit .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5helloPcPi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z5helloPcPi,@object # @_Z5helloPcPi .section .rodata,"a",@progbits .globl _Z5helloPcPi .p2align 3, 0x0 _Z5helloPcPi: .quad _Z20__device_stub__helloPcPi .size _Z5helloPcPi, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Error %d at line %d\n" .size .L.str, 21 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "%s" .size .L.str.1, 3 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z5helloPcPi" .size .L__unnamed_1, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z20__device_stub__helloPcPi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z5helloPcPi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z5helloPcPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */ /* 0x000e240000002100 */ /*0020*/ ISETP.GT.AND P0, PT, R6, 0xf, PT ; /* 0x0000000f0600780c */ /* 0x001fda0003f04270 */ /*0030*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0040*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fe200000001ff */ /*0050*/ IADD3 R4, P0, R6, c[0x0][0x160], RZ ; /* 0x0000580006047a10 */ /* 0x000fe20007f1e0ff */ /*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc80000000a00 */ /*0070*/ IMAD.X R5, RZ, RZ, c[0x0][0x164], P0 ; /* 0x00005900ff057624 */ /* 0x000fc800000e06ff */ /*0080*/ IMAD.WIDE.U32 R2, R6.reuse, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */ /* 0x040fe400078e0003 */ /*0090*/ LDG.E.U8 R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea8000c1e1100 */ /*00a0*/ LDG.E.U8 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1100 */ /*00b0*/ LEA.HI.X.SX32 R7, R6, c[0x0][0x164], 0x1, P0 ; /* 0x0000590006077a11 */ /* 0x000fe200000f0eff */ /*00c0*/ IMAD.MOV.U32 R6, RZ, RZ, R4 ; /* 0x000000ffff067224 */ /* 0x000fe200078e0004 */ /*00d0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */ /* 0x004fca0007ffe0ff */ /*00e0*/ STG.E.U8 [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101104 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z5helloPcPi .globl _Z5helloPcPi .p2align 8 .type _Z5helloPcPi,@function _Z5helloPcPi: s_mov_b32 s2, exec_lo v_cmpx_gt_u32_e32 16, v0 s_cbranch_execz .LBB0_2 s_load_b128 s[0:3], s[0:1], 0x0 v_lshlrev_b32_e32 v1, 2, v0 s_waitcnt lgkmcnt(0) global_load_u8 v2, v0, s[0:1] global_load_b32 v1, v1, s[2:3] s_waitcnt vmcnt(0) v_add_nc_u16 v1, v2, v1 global_store_b8 v0, v1, s[0:1] .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z5helloPcPi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 16 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 3 .amdhsa_next_free_sgpr 4 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z5helloPcPi, .Lfunc_end0-_Z5helloPcPi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 16 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z5helloPcPi .private_segment_fixed_size: 0 .sgpr_count: 4 .sgpr_spill_count: 0 .symbol: _Z5helloPcPi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 3 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000a2264_00000000-6_hello-world.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2074: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2074: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Error %d at line %d\n" .text .globl _Z15exitWithFailure9cudaErrori .type _Z15exitWithFailure9cudaErrori, @function _Z15exitWithFailure9cudaErrori: .LFB2070: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $8, %rsp .cfi_def_cfa_offset 16 movl %edi, %edx movl %esi, %ecx leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .cfi_endproc .LFE2070: .size _Z15exitWithFailure9cudaErrori, .-_Z15exitWithFailure9cudaErrori .globl _Z26__device_stub__Z5helloPcPiPcPi .type _Z26__device_stub__Z5helloPcPiPcPi, @function _Z26__device_stub__Z5helloPcPiPcPi: .LFB2096: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L9 .L5: movq 104(%rsp), %rax subq %fs:40, %rax jne .L10 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z5helloPcPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L5 .L10: call __stack_chk_fail@PLT .cfi_endproc .LFE2096: .size _Z26__device_stub__Z5helloPcPiPcPi, .-_Z26__device_stub__Z5helloPcPiPcPi .globl _Z5helloPcPi .type _Z5helloPcPi, @function _Z5helloPcPi: .LFB2097: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z5helloPcPiPcPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2097: .size _Z5helloPcPi, .-_Z5helloPcPi .section .rodata.str1.1 .LC1: .string "%s" .LC2: .string "%s\n" .text .globl main .type main, @function main: .LFB2071: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $144, %rsp .cfi_def_cfa_offset 160 movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax movabsq $35662932501832, %rax movl $0, %edx movq %rax, 112(%rsp) movq %rdx, 120(%rsp) movl $15, 48(%rsp) movl $10, 52(%rsp) movl $6, 56(%rsp) movl $0, 60(%rsp) movl $-11, 64(%rsp) movl $1, 68(%rsp) movl $0, 72(%rsp) movl $0, 76(%rsp) movl $0, 80(%rsp) movl $0, 84(%rsp) movl $0, 88(%rsp) movl $0, 92(%rsp) movl $0, 96(%rsp) movl $0, 100(%rsp) movl $0, 104(%rsp) movl $0, 108(%rsp) leaq 112(%rsp), %rdx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 8(%rsp), %rdi movl $16, %esi call cudaMalloc@PLT testl %eax, %eax jne .L29 leaq 16(%rsp), %rdi movl $64, %esi call cudaMalloc@PLT testl %eax, %eax jne .L30 leaq 112(%rsp), %rsi movl $1, %ecx movl $16, %edx movq 8(%rsp), %rdi call cudaMemcpy@PLT testl %eax, %eax jne .L31 leaq 48(%rsp), %rsi movl $1, %ecx movl $64, %edx movq 16(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %ebx movl $16, 24(%rsp) movl $1, 28(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $0, %r9d movl $0, %r8d movq 24(%rsp), %rdx movl $1, %ecx movq 36(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L32 .L20: testl %ebx, %ebx jne .L33 leaq 112(%rsp), %rdi movl $2, %ecx movl $16, %edx movq 8(%rsp), %rsi call cudaMemcpy@PLT testl %eax, %eax jne .L34 movq 8(%rsp), %rdi call cudaFree@PLT testl %eax, %eax jne .L35 movq 16(%rsp), %rdi call cudaFree@PLT leaq 112(%rsp), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 136(%rsp), %rax subq %fs:40, %rax jne .L36 movl $0, %eax addq $144, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state movq 136(%rsp), %rdx subq %fs:40, %rdx jne .L37 movl $46, %esi movl %eax, %edi call _Z15exitWithFailure9cudaErrori .L37: call __stack_chk_fail@PLT .L30: movq 136(%rsp), %rdx subq %fs:40, %rdx jne .L38 movl $48, %esi movl %eax, %edi call _Z15exitWithFailure9cudaErrori .L38: call __stack_chk_fail@PLT .L31: movq 136(%rsp), %rdx subq %fs:40, %rdx jne .L39 movl $50, %esi movl %eax, %edi call _Z15exitWithFailure9cudaErrori .L39: call __stack_chk_fail@PLT .L32: movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z26__device_stub__Z5helloPcPiPcPi jmp .L20 .L33: movq 136(%rsp), %rax subq %fs:40, %rax jne .L40 movl $56, %esi movl %ebx, %edi call _Z15exitWithFailure9cudaErrori .L40: call __stack_chk_fail@PLT .L34: movq 136(%rsp), %rdx subq %fs:40, %rdx jne .L41 movl $58, %esi movl %eax, %edi call _Z15exitWithFailure9cudaErrori .L41: call __stack_chk_fail@PLT .L35: movq 136(%rsp), %rdx subq %fs:40, %rdx jne .L42 movl $60, %esi movl %eax, %edi call _Z15exitWithFailure9cudaErrori .L42: call __stack_chk_fail@PLT .L36: call __stack_chk_fail@PLT .cfi_endproc .LFE2071: .size main, .-main .section .rodata.str1.1 .LC3: .string "_Z5helloPcPi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2099: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z5helloPcPi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2099: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "hello-world.hip" .globl _Z20__device_stub__helloPcPi # -- Begin function _Z20__device_stub__helloPcPi .p2align 4, 0x90 .type _Z20__device_stub__helloPcPi,@function _Z20__device_stub__helloPcPi: # @_Z20__device_stub__helloPcPi .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z5helloPcPi, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z20__device_stub__helloPcPi, .Lfunc_end0-_Z20__device_stub__helloPcPi .cfi_endproc # -- End function .globl _Z15exitWithFailure10hipError_ti # -- Begin function _Z15exitWithFailure10hipError_ti .p2align 4, 0x90 .type _Z15exitWithFailure10hipError_ti,@function _Z15exitWithFailure10hipError_ti: # @_Z15exitWithFailure10hipError_ti .cfi_startproc # %bb.0: pushq %rax .cfi_def_cfa_offset 16 movl %esi, %edx movl %edi, %esi movl $.L.str, %edi xorl %eax, %eax callq printf movl $1, %edi callq exit .Lfunc_end1: .size _Z15exitWithFailure10hipError_ti, .Lfunc_end1-_Z15exitWithFailure10hipError_ti .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $192, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -16 movl $1819043144, 16(%rsp) # imm = 0x6C6C6548 movw $8303, 20(%rsp) # imm = 0x206F movq $0, 22(%rsp) movw $0, 30(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 128(%rsp) movaps %xmm0, 144(%rsp) movaps %xmm0, 176(%rsp) movaps %xmm0, 160(%rsp) movabsq $42949672975, %rax # imm = 0xA0000000F movq %rax, 128(%rsp) movl $6, 136(%rsp) movabsq $8589934581, %rax # imm = 0x1FFFFFFF5 movq %rax, 144(%rsp) leaq 16(%rsp), %rsi movl $.L.str.1, %edi xorl %eax, %eax callq printf leaq 8(%rsp), %rdi movl $16, %esi callq hipMalloc testl %eax, %eax jne .LBB2_1 # %bb.3: leaq 40(%rsp), %rdi movl $64, %esi callq hipMalloc testl %eax, %eax jne .LBB2_4 # %bb.5: movq 8(%rsp), %rdi leaq 16(%rsp), %rsi movl $16, %edx movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_6 # %bb.7: movq 40(%rsp), %rdi leaq 128(%rsp), %rsi movl $64, %edx movl $1, %ecx callq hipMemcpy movl %eax, %ebx movabsq $4294967297, %rdi # imm = 0x100000001 leaq 15(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_9 # %bb.8: movq 8(%rsp), %rax movq 40(%rsp), %rcx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 80(%rsp), %rdi leaq 64(%rsp), %rsi leaq 56(%rsp), %rdx leaq 48(%rsp), %rcx callq __hipPopCallConfiguration movq 80(%rsp), %rsi movl 88(%rsp), %edx movq 64(%rsp), %rcx movl 72(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z5helloPcPi, %edi pushq 48(%rsp) .cfi_adjust_cfa_offset 8 pushq 64(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_9: testl %ebx, %ebx jne .LBB2_10 # %bb.11: movq 8(%rsp), %rsi leaq 16(%rsp), %rdi movl $16, %edx movl $2, %ecx callq hipMemcpy testl %eax, %eax jne .LBB2_12 # %bb.13: movq 8(%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB2_14 # %bb.15: movq 40(%rsp), %rdi callq hipFree leaq 16(%rsp), %rdi callq puts@PLT xorl %eax, %eax addq $192, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .LBB2_1: .cfi_def_cfa_offset 208 movl $.L.str, %edi movl %eax, %esi movl $46, %edx jmp .LBB2_2 .LBB2_4: movl $.L.str, %edi movl %eax, %esi movl $48, %edx jmp .LBB2_2 .LBB2_6: movl $.L.str, %edi movl %eax, %esi movl $50, %edx jmp .LBB2_2 .LBB2_10: movl $.L.str, %edi movl %ebx, %esi movl $56, %edx jmp .LBB2_2 .LBB2_12: movl $.L.str, %edi movl %eax, %esi movl $58, %edx jmp .LBB2_2 .LBB2_14: movl $.L.str, %edi movl %eax, %esi movl $60, %edx .LBB2_2: xorl %eax, %eax callq printf movl $1, %edi callq exit .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5helloPcPi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z5helloPcPi,@object # @_Z5helloPcPi .section .rodata,"a",@progbits .globl _Z5helloPcPi .p2align 3, 0x0 _Z5helloPcPi: .quad _Z20__device_stub__helloPcPi .size _Z5helloPcPi, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Error %d at line %d\n" .size .L.str, 21 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "%s" .size .L.str.1, 3 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z5helloPcPi" .size .L__unnamed_1, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z20__device_stub__helloPcPi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z5helloPcPi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
/** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * Shared memory is necessary here because we are reading and writing * to memory many times... * * Note that __syncthreads is not needed here because each row in shared * memory is exclusively read and written to by a single warp. */ __global__ void pow_rows(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[32 * 32]; // store result in shared memory for fast writes. __shared__ float s_res[32 * 32]; // assign each thread an index so that threads in the same warp process // elements in the same row. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single row. // each thread in the warp computes the power of a single element. while (pow > 0) { s_res[row_i] *= s_a[row_i]; pow -= 1; } // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; }; /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * After reading the matrix a into local memory row by row, we * compute the power of each element on a column by column basis * in order to cause a bank conflict. * * Note that __syncthreads is necessary here because the same shared * memory is accessed by multiple warps. */ __global__ void pow_cols(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[32 * 32]; // store result in shared memory for fast writes. __shared__ float s_res[32 * 32]; // assign each thread an index so that threads in the same warp process // elements in the same row. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // in order to process the matrix column-by-column... all warps must // finish initializing shared memory row-by-row. __syncthreads(); // assign each thread an index so that threads in the same warp process // elements in the same column. const uint col_i = threadIdx.y + 32 * threadIdx.x; // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single column. // each thread in the warp computes the power of a single element. while (pow > 0) { // Note that col_i % 32 = threadIdx.y. // Since all threads in the same warp have the same threadIdx.y, this // is a 32-way bank conflict! s_res[col_i] *= s_a[col_i]; pow -= 1; } // in order to read the matrix row-by-row... all warps must // finish initializing shared memory column-by-column. __syncthreads(); // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; }; /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * After reading the matrix a into local memory row by row, we * compute the power of each element on a column by column basis. * Due to zero padding, we don't have a bank conflict. * * Note that __syncthreads is necessary here because the same shared * memory is accessed by multiple warps. */ __global__ void pow_cols_pad(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[33 * 33]; // store result in shared memory for fast writes. __shared__ float s_res[33 * 33]; // assign each thread an index so that threads in the same warp process // elements in the same column. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // assign each thread an index so that threads in the same warp process // elements in the same column. const uint col_i = threadIdx.y + 33 * threadIdx.x; // in order to process the matrix column-by-column... all warps must // finish initializing shared memory row-by-row. __syncthreads(); // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single column. // each thread in the warp computes the power of a single element. while (pow > 0) { // Results from number theory: Additive group of integers mod n is // generated by all integers m relatively prime to n. A warp conflict occurs // if two threads in a warp access the same address mod 32. We // minimize bank conflicts by reading and writing data to shared memory // with a stride m relatively prime to n. // // Even though we are reading data column-by-column, we don't have // bank conflicts since our stride is relatively prime to 32. // For larger matrices (size n), we should choose a stride that is // relatively prime to 32. It is useful to note that for any integer n, // gcd(n, n + 1) = 1. s_res[col_i] *= s_a[col_i]; pow -= 1; } // in order to read the matrix row-by-row... all warps must // finish initializing shared memory column-by-column. __syncthreads(); // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; };
code for sm_80 Function : _Z12pow_cols_padPKfjPf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_TID.Y ; /* 0x0000000000047919 */ /* 0x000e220000002200 */ /*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e240000002100 */ /*0050*/ LEA R0, R4, R5, 0x5 ; /* 0x0000000504007211 */ /* 0x001fca00078e28ff */ /*0060*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fcc00078e0003 */ /*0070*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*0080*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */ /* 0x000fe20003f05270 */ /*0090*/ HFMA2.MMA R7, -RZ, RZ, 1.875, 0 ; /* 0x3f800000ff077435 */ /* 0x000fce00000001ff */ /*00a0*/ STS [R0.X4+0x1104], R7 ; /* 0x0011040700007388 */ /* 0x0001e80000004800 */ /*00b0*/ STS [R0.X4], R3 ; /* 0x0000000300007388 */ /* 0x0041e80000004800 */ /*00c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*00d0*/ @!P0 BRA 0x4d0 ; /* 0x000003f000008947 */ /* 0x000fea0003800000 */ /*00e0*/ IMAD R2, R5, 0x21, R4 ; /* 0x0000002105027824 */ /* 0x001fe200078e0204 */ /*00f0*/ MOV R5, c[0x0][0x168] ; /* 0x00005a0000057a02 */ /* 0x000fc80000000f00 */ /*0100*/ LDS R3, [R2.X4] ; /* 0x0000000002037984 */ /* 0x0000620000004800 */ /*0110*/ IADD3 R4, R5.reuse, -0x1, RZ ; /* 0xffffffff05047810 */ /* 0x040fe40007ffe0ff */ /*0120*/ LOP3.LUT P1, R5, R5, 0x3, RZ, 0xc0, !PT ; /* 0x0000000305057812 */ /* 0x000fe2000782c0ff */ /*0130*/ LDS R6, [R2.X4+0x1104] ; /* 0x0011040002067984 */ /* 0x0000a20000004800 */ /*0140*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */ /* 0x000fe40003f06070 */ /*0150*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */ /* 0x000fd20000000f00 */ /*0160*/ @!P1 BRA 0x1d0 ; /* 0x0000006000009947 */ /* 0x000fea0003800000 */ /*0170*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */ /* 0x001fe40000000f00 */ /*0180*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */ /* 0x000fe20007ffe0ff */ /*0190*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x006fe20000400000 */ /*01a0*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */ /* 0x000fe40007ffe0ff */ /*01b0*/ ISETP.NE.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fda0003f25270 */ /*01c0*/ @P1 BRA 0x180 ; /* 0xffffffb000001947 */ /* 0x000fea000383ffff */ /*01d0*/ @!P0 BRA 0x4c0 ; /* 0x000002e000008947 */ /* 0x001fea0003800000 */ /*01e0*/ ISETP.GT.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fda0003f04270 */ /*01f0*/ @!P0 BRA 0x450 ; /* 0x0000025000008947 */ /* 0x000fea0003800000 */ /*0200*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */ /* 0x000fe40003f24270 */ /*0210*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*0220*/ @!P1 BRA 0x370 ; /* 0x0000014000009947 */ /* 0x000fea0003800000 */ /*0230*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0240*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x006fe20000400000 */ /*0250*/ IADD3 R4, R4, -0x10, RZ ; /* 0xfffffff004047810 */ /* 0x000fc60007ffe0ff */ /*0260*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fe20000400000 */ /*0270*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */ /* 0x000fc60003f24270 */ /*0280*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0290*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02a0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02b0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02c0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02d0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02e0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02f0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0300*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0310*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0320*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0330*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0340*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0350*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fe20000400000 */ /*0360*/ @P1 BRA 0x240 ; /* 0xfffffed000001947 */ /* 0x000fea000383ffff */ /*0370*/ ISETP.GT.AND P1, PT, R4, 0x4, PT ; /* 0x000000040400780c */ /* 0x000fda0003f24270 */ /*0380*/ @!P1 BRA 0x430 ; /* 0x000000a000009947 */ /* 0x000fea0003800000 */ /*0390*/ FMUL R6, R3.reuse, R6 ; /* 0x0000000603067220 */ /* 0x046fe20000400000 */ /*03a0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*03b0*/ IADD3 R4, R4, -0x8, RZ ; /* 0xfffffff804047810 */ /* 0x000fe20007ffe0ff */ /*03c0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*03d0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*03e0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*03f0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0400*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0410*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0420*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fe40000400000 */ /*0430*/ ISETP.NE.OR P0, PT, R4, RZ, P0 ; /* 0x000000ff0400720c */ /* 0x000fda0000705670 */ /*0440*/ @!P0 BRA 0x4c0 ; /* 0x0000007000008947 */ /* 0x000fea0003800000 */ /*0450*/ IADD3 R4, R4, -0x4, RZ ; /* 0xfffffffc04047810 */ /* 0x000fe20007ffe0ff */ /*0460*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x006fc60000400000 */ /*0470*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fe20003f05270 */ /*0480*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0490*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*04a0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*04b0*/ @P0 BRA 0x450 ; /* 0xffffff9000000947 */ /* 0x000fea000383ffff */ /*04c0*/ STS [R2.X4+0x1104], R6 ; /* 0x0011040602007388 */ /* 0x0041e40000004800 */ /*04d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x001fe20000010000 */ /*04e0*/ LEA R2, P0, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000027a11 */ /* 0x000fc800078010ff */ /*04f0*/ LEA.HI.X R3, R0, c[0x0][0x174], RZ, 0x2, P0 ; /* 0x00005d0000037a11 */ /* 0x002fe200000f14ff */ /*0500*/ LDS R5, [R0.X4+0x1104] ; /* 0x0011040000057984 */ /* 0x000e280000004800 */ /*0510*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x001fe2000c101904 */ /*0520*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0530*/ BRA 0x530; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0540*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0550*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0560*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0570*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0580*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0590*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z8pow_colsPKfjPf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_TID.Y ; /* 0x0000000000047919 */ /* 0x000e220000002200 */ /*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e240000002100 */ /*0050*/ LEA R0, R4, R5, 0x5 ; /* 0x0000000504007211 */ /* 0x001fca00078e28ff */ /*0060*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fcc00078e0003 */ /*0070*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*0080*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */ /* 0x000fe20003f05270 */ /*0090*/ HFMA2.MMA R7, -RZ, RZ, 1.875, 0 ; /* 0x3f800000ff077435 */ /* 0x000fce00000001ff */ /*00a0*/ STS [R0.X4+0x1000], R7 ; /* 0x0010000700007388 */ /* 0x0001e80000004800 */ /*00b0*/ STS [R0.X4], R3 ; /* 0x0000000300007388 */ /* 0x0041e80000004800 */ /*00c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*00d0*/ @!P0 BRA 0x4d0 ; /* 0x000003f000008947 */ /* 0x000fea0003800000 */ /*00e0*/ LEA R2, R5, R4, 0x5 ; /* 0x0000000405027211 */ /* 0x001fe400078e28ff */ /*00f0*/ MOV R5, c[0x0][0x168] ; /* 0x00005a0000057a02 */ /* 0x000fc60000000f00 */ /*0100*/ LDS R3, [R2.X4] ; /* 0x0000000002037984 */ /* 0x0000620000004800 */ /*0110*/ IADD3 R4, R5.reuse, -0x1, RZ ; /* 0xffffffff05047810 */ /* 0x040fe40007ffe0ff */ /*0120*/ LOP3.LUT P1, R5, R5, 0x3, RZ, 0xc0, !PT ; /* 0x0000000305057812 */ /* 0x000fe2000782c0ff */ /*0130*/ LDS R6, [R2.X4+0x1000] ; /* 0x0010000002067984 */ /* 0x0000a20000004800 */ /*0140*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */ /* 0x000fe40003f06070 */ /*0150*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */ /* 0x000fd20000000f00 */ /*0160*/ @!P1 BRA 0x1d0 ; /* 0x0000006000009947 */ /* 0x000fea0003800000 */ /*0170*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */ /* 0x001fe40000000f00 */ /*0180*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */ /* 0x000fe20007ffe0ff */ /*0190*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x006fe20000400000 */ /*01a0*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */ /* 0x000fe40007ffe0ff */ /*01b0*/ ISETP.NE.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fda0003f25270 */ /*01c0*/ @P1 BRA 0x180 ; /* 0xffffffb000001947 */ /* 0x000fea000383ffff */ /*01d0*/ @!P0 BRA 0x4c0 ; /* 0x000002e000008947 */ /* 0x001fea0003800000 */ /*01e0*/ ISETP.GT.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fda0003f04270 */ /*01f0*/ @!P0 BRA 0x450 ; /* 0x0000025000008947 */ /* 0x000fea0003800000 */ /*0200*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */ /* 0x000fe40003f24270 */ /*0210*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*0220*/ @!P1 BRA 0x370 ; /* 0x0000014000009947 */ /* 0x000fea0003800000 */ /*0230*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0240*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x006fe20000400000 */ /*0250*/ IADD3 R4, R4, -0x10, RZ ; /* 0xfffffff004047810 */ /* 0x000fc60007ffe0ff */ /*0260*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fe20000400000 */ /*0270*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */ /* 0x000fc60003f24270 */ /*0280*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0290*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02a0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02b0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02c0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02d0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02e0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02f0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0300*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0310*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0320*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0330*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0340*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0350*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fe20000400000 */ /*0360*/ @P1 BRA 0x240 ; /* 0xfffffed000001947 */ /* 0x000fea000383ffff */ /*0370*/ ISETP.GT.AND P1, PT, R4, 0x4, PT ; /* 0x000000040400780c */ /* 0x000fda0003f24270 */ /*0380*/ @!P1 BRA 0x430 ; /* 0x000000a000009947 */ /* 0x000fea0003800000 */ /*0390*/ FMUL R6, R3.reuse, R6 ; /* 0x0000000603067220 */ /* 0x046fe20000400000 */ /*03a0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*03b0*/ IADD3 R4, R4, -0x8, RZ ; /* 0xfffffff804047810 */ /* 0x000fe20007ffe0ff */ /*03c0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*03d0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*03e0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*03f0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0400*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0410*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0420*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fe40000400000 */ /*0430*/ ISETP.NE.OR P0, PT, R4, RZ, P0 ; /* 0x000000ff0400720c */ /* 0x000fda0000705670 */ /*0440*/ @!P0 BRA 0x4c0 ; /* 0x0000007000008947 */ /* 0x000fea0003800000 */ /*0450*/ IADD3 R4, R4, -0x4, RZ ; /* 0xfffffffc04047810 */ /* 0x000fe20007ffe0ff */ /*0460*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x006fc60000400000 */ /*0470*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fe20003f05270 */ /*0480*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0490*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*04a0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*04b0*/ @P0 BRA 0x450 ; /* 0xffffff9000000947 */ /* 0x000fea000383ffff */ /*04c0*/ STS [R2.X4+0x1000], R6 ; /* 0x0010000602007388 */ /* 0x0041e40000004800 */ /*04d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x001fe20000010000 */ /*04e0*/ LEA R2, P0, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000027a11 */ /* 0x000fc800078010ff */ /*04f0*/ LEA.HI.X R3, R0, c[0x0][0x174], RZ, 0x2, P0 ; /* 0x00005d0000037a11 */ /* 0x002fe200000f14ff */ /*0500*/ LDS R5, [R0.X4+0x1000] ; /* 0x0010000000057984 */ /* 0x000e280000004800 */ /*0510*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x001fe2000c101904 */ /*0520*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0530*/ BRA 0x530; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0540*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0550*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0560*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0570*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0580*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0590*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z8pow_rowsPKfjPf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */ /* 0x000e220000002200 */ /*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ LEA R0, R0, R3, 0x5 ; /* 0x0000000300007211 */ /* 0x001fca00078e28ff */ /*0060*/ IMAD.WIDE.U32 R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fcc00078e0005 */ /*0070*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*0080*/ HFMA2.MMA R5, -RZ, RZ, 1.875, 0 ; /* 0x3f800000ff057435 */ /* 0x000fe200000001ff */ /*0090*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */ /* 0x000fe40003f05270 */ /*00a0*/ MOV R4, 0x3f800000 ; /* 0x3f80000000047802 */ /* 0x000fc80000000f00 */ /*00b0*/ STS [R0.X4+0x1000], R5 ; /* 0x0010000500007388 */ /* 0x0001e80000004800 */ /*00c0*/ STS [R0.X4], R3 ; /* 0x0000000300007388 */ /* 0x0041e60000004800 */ /*00d0*/ @!P0 BRA 0x4c0 ; /* 0x000003e000008947 */ /* 0x000fea0003800000 */ /*00e0*/ MOV R2, c[0x0][0x168] ; /* 0x00005a0000027a02 */ /* 0x000fe40000000f00 */ /*00f0*/ MOV R5, c[0x0][0x168] ; /* 0x00005a0000057a02 */ /* 0x001fe40000000f00 */ /*0100*/ IADD3 R4, R2, -0x1, RZ ; /* 0xffffffff02047810 */ /* 0x000fc40007ffe0ff */ /*0110*/ LOP3.LUT P1, R2, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302027812 */ /* 0x000fe4000782c0ff */ /*0120*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */ /* 0x000fe40003f06070 */ /*0130*/ MOV R4, 0x3f800000 ; /* 0x3f80000000047802 */ /* 0x000fd20000000f00 */ /*0140*/ @!P1 BRA 0x1c0 ; /* 0x0000007000009947 */ /* 0x000fea0003800000 */ /*0150*/ HFMA2.MMA R4, -RZ, RZ, 1.875, 0 ; /* 0x3f800000ff047435 */ /* 0x000fe200000001ff */ /*0160*/ MOV R5, c[0x0][0x168] ; /* 0x00005a0000057a02 */ /* 0x000fd00000000f00 */ /*0170*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */ /* 0x000fe20007ffe0ff */ /*0180*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fe20000400000 */ /*0190*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */ /* 0x000fe40007ffe0ff */ /*01a0*/ ISETP.NE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fda0003f25270 */ /*01b0*/ @P1 BRA 0x170 ; /* 0xffffffb000001947 */ /* 0x000fea000383ffff */ /*01c0*/ @!P0 BRA 0x4b0 ; /* 0x000002e000008947 */ /* 0x000fea0003800000 */ /*01d0*/ ISETP.GT.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fda0003f04270 */ /*01e0*/ @!P0 BRA 0x440 ; /* 0x0000025000008947 */ /* 0x000fea0003800000 */ /*01f0*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */ /* 0x000fe40003f24270 */ /*0200*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*0210*/ @!P1 BRA 0x360 ; /* 0x0000014000009947 */ /* 0x000fea0003800000 */ /*0220*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0230*/ FMUL R2, R3, R4 ; /* 0x0000000403027220 */ /* 0x000fe20000400000 */ /*0240*/ IADD3 R5, R5, -0x10, RZ ; /* 0xfffffff005057810 */ /* 0x000fc60007ffe0ff */ /*0250*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fe20000400000 */ /*0260*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */ /* 0x000fc60003f24270 */ /*0270*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*0280*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*0290*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*02a0*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*02b0*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*02c0*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*02d0*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*02e0*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*02f0*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*0300*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*0310*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*0320*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*0330*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*0340*/ FMUL R4, R3, R2 ; /* 0x0000000203047220 */ /* 0x000fe20000400000 */ /*0350*/ @P1 BRA 0x230 ; /* 0xfffffed000001947 */ /* 0x000fea000383ffff */ /*0360*/ ISETP.GT.AND P1, PT, R5, 0x4, PT ; /* 0x000000040500780c */ /* 0x000fda0003f24270 */ /*0370*/ @!P1 BRA 0x420 ; /* 0x000000a000009947 */ /* 0x000fea0003800000 */ /*0380*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fe20000400000 */ /*0390*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*03a0*/ IADD3 R5, R5, -0x8, RZ ; /* 0xfffffff805057810 */ /* 0x000fe20007ffe0ff */ /*03b0*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*03c0*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*03d0*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*03e0*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*03f0*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*0400*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*0410*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fe40000400000 */ /*0420*/ ISETP.NE.OR P0, PT, R5, RZ, P0 ; /* 0x000000ff0500720c */ /* 0x000fda0000705670 */ /*0430*/ @!P0 BRA 0x4b0 ; /* 0x0000007000008947 */ /* 0x000fea0003800000 */ /*0440*/ IADD3 R5, R5, -0x4, RZ ; /* 0xfffffffc05057810 */ /* 0x000fe20007ffe0ff */ /*0450*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc60000400000 */ /*0460*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fe20003f05270 */ /*0470*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*0480*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*0490*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*04a0*/ @P0 BRA 0x440 ; /* 0xffffff9000000947 */ /* 0x000fea000383ffff */ /*04b0*/ STS [R0.X4+0x1000], R4 ; /* 0x0010000400007388 */ /* 0x0001e40000004800 */ /*04c0*/ LEA R2, P0, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000027a11 */ /* 0x000fc800078010ff */ /*04d0*/ LEA.HI.X R3, R0, c[0x0][0x174], RZ, 0x2, P0 ; /* 0x00005d0000037a11 */ /* 0x001fca00000f14ff */ /*04e0*/ STG.E [R2.64], R4 ; /* 0x0000000402007986 */ /* 0x000fe2000c101904 */ /*04f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0500*/ BRA 0x500; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0510*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0520*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0530*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0540*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0550*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0560*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0570*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0580*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0590*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * Shared memory is necessary here because we are reading and writing * to memory many times... * * Note that __syncthreads is not needed here because each row in shared * memory is exclusively read and written to by a single warp. */ __global__ void pow_rows(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[32 * 32]; // store result in shared memory for fast writes. __shared__ float s_res[32 * 32]; // assign each thread an index so that threads in the same warp process // elements in the same row. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single row. // each thread in the warp computes the power of a single element. while (pow > 0) { s_res[row_i] *= s_a[row_i]; pow -= 1; } // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; }; /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * After reading the matrix a into local memory row by row, we * compute the power of each element on a column by column basis * in order to cause a bank conflict. * * Note that __syncthreads is necessary here because the same shared * memory is accessed by multiple warps. */ __global__ void pow_cols(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[32 * 32]; // store result in shared memory for fast writes. __shared__ float s_res[32 * 32]; // assign each thread an index so that threads in the same warp process // elements in the same row. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // in order to process the matrix column-by-column... all warps must // finish initializing shared memory row-by-row. __syncthreads(); // assign each thread an index so that threads in the same warp process // elements in the same column. const uint col_i = threadIdx.y + 32 * threadIdx.x; // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single column. // each thread in the warp computes the power of a single element. while (pow > 0) { // Note that col_i % 32 = threadIdx.y. // Since all threads in the same warp have the same threadIdx.y, this // is a 32-way bank conflict! s_res[col_i] *= s_a[col_i]; pow -= 1; } // in order to read the matrix row-by-row... all warps must // finish initializing shared memory column-by-column. __syncthreads(); // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; }; /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * After reading the matrix a into local memory row by row, we * compute the power of each element on a column by column basis. * Due to zero padding, we don't have a bank conflict. * * Note that __syncthreads is necessary here because the same shared * memory is accessed by multiple warps. */ __global__ void pow_cols_pad(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[33 * 33]; // store result in shared memory for fast writes. __shared__ float s_res[33 * 33]; // assign each thread an index so that threads in the same warp process // elements in the same column. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // assign each thread an index so that threads in the same warp process // elements in the same column. const uint col_i = threadIdx.y + 33 * threadIdx.x; // in order to process the matrix column-by-column... all warps must // finish initializing shared memory row-by-row. __syncthreads(); // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single column. // each thread in the warp computes the power of a single element. while (pow > 0) { // Results from number theory: Additive group of integers mod n is // generated by all integers m relatively prime to n. A warp conflict occurs // if two threads in a warp access the same address mod 32. We // minimize bank conflicts by reading and writing data to shared memory // with a stride m relatively prime to n. // // Even though we are reading data column-by-column, we don't have // bank conflicts since our stride is relatively prime to 32. // For larger matrices (size n), we should choose a stride that is // relatively prime to 32. It is useful to note that for any integer n, // gcd(n, n + 1) = 1. s_res[col_i] *= s_a[col_i]; pow -= 1; } // in order to read the matrix row-by-row... all warps must // finish initializing shared memory column-by-column. __syncthreads(); // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; };
.file "tmpxft_001966bc_00000000-6_example.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z31__device_stub__Z8pow_rowsPKfjPfPKfjPf .type _Z31__device_stub__Z8pow_rowsPKfjPfPKfjPf, @function _Z31__device_stub__Z8pow_rowsPKfjPfPKfjPf: .LFB2051: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8pow_rowsPKfjPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z31__device_stub__Z8pow_rowsPKfjPfPKfjPf, .-_Z31__device_stub__Z8pow_rowsPKfjPfPKfjPf .globl _Z8pow_rowsPKfjPf .type _Z8pow_rowsPKfjPf, @function _Z8pow_rowsPKfjPf: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z8pow_rowsPKfjPfPKfjPf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z8pow_rowsPKfjPf, .-_Z8pow_rowsPKfjPf .globl _Z31__device_stub__Z8pow_colsPKfjPfPKfjPf .type _Z31__device_stub__Z8pow_colsPKfjPfPKfjPf, @function _Z31__device_stub__Z8pow_colsPKfjPfPKfjPf: .LFB2053: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 120(%rsp), %rax subq %fs:40, %rax jne .L16 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8pow_colsPKfjPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2053: .size _Z31__device_stub__Z8pow_colsPKfjPfPKfjPf, .-_Z31__device_stub__Z8pow_colsPKfjPfPKfjPf .globl _Z8pow_colsPKfjPf .type _Z8pow_colsPKfjPf, @function _Z8pow_colsPKfjPf: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z8pow_colsPKfjPfPKfjPf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _Z8pow_colsPKfjPf, .-_Z8pow_colsPKfjPf .globl _Z36__device_stub__Z12pow_cols_padPKfjPfPKfjPf .type _Z36__device_stub__Z12pow_cols_padPKfjPfPKfjPf, @function _Z36__device_stub__Z12pow_cols_padPKfjPfPKfjPf: .LFB2055: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L23 .L19: movq 120(%rsp), %rax subq %fs:40, %rax jne .L24 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12pow_cols_padPKfjPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L19 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE2055: .size _Z36__device_stub__Z12pow_cols_padPKfjPfPKfjPf, .-_Z36__device_stub__Z12pow_cols_padPKfjPfPKfjPf .globl _Z12pow_cols_padPKfjPf .type _Z12pow_cols_padPKfjPf, @function _Z12pow_cols_padPKfjPf: .LFB2056: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z12pow_cols_padPKfjPfPKfjPf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2056: .size _Z12pow_cols_padPKfjPf, .-_Z12pow_cols_padPKfjPf .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z12pow_cols_padPKfjPf" .LC1: .string "_Z8pow_colsPKfjPf" .LC2: .string "_Z8pow_rowsPKfjPf" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2058: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z12pow_cols_padPKfjPf(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z8pow_colsPKfjPf(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z8pow_rowsPKfjPf(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * Shared memory is necessary here because we are reading and writing * to memory many times... * * Note that __syncthreads is not needed here because each row in shared * memory is exclusively read and written to by a single warp. */ __global__ void pow_rows(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[32 * 32]; // store result in shared memory for fast writes. __shared__ float s_res[32 * 32]; // assign each thread an index so that threads in the same warp process // elements in the same row. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single row. // each thread in the warp computes the power of a single element. while (pow > 0) { s_res[row_i] *= s_a[row_i]; pow -= 1; } // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; }; /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * After reading the matrix a into local memory row by row, we * compute the power of each element on a column by column basis * in order to cause a bank conflict. * * Note that __syncthreads is necessary here because the same shared * memory is accessed by multiple warps. */ __global__ void pow_cols(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[32 * 32]; // store result in shared memory for fast writes. __shared__ float s_res[32 * 32]; // assign each thread an index so that threads in the same warp process // elements in the same row. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // in order to process the matrix column-by-column... all warps must // finish initializing shared memory row-by-row. __syncthreads(); // assign each thread an index so that threads in the same warp process // elements in the same column. const uint col_i = threadIdx.y + 32 * threadIdx.x; // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single column. // each thread in the warp computes the power of a single element. while (pow > 0) { // Note that col_i % 32 = threadIdx.y. // Since all threads in the same warp have the same threadIdx.y, this // is a 32-way bank conflict! s_res[col_i] *= s_a[col_i]; pow -= 1; } // in order to read the matrix row-by-row... all warps must // finish initializing shared memory column-by-column. __syncthreads(); // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; }; /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * After reading the matrix a into local memory row by row, we * compute the power of each element on a column by column basis. * Due to zero padding, we don't have a bank conflict. * * Note that __syncthreads is necessary here because the same shared * memory is accessed by multiple warps. */ __global__ void pow_cols_pad(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[33 * 33]; // store result in shared memory for fast writes. __shared__ float s_res[33 * 33]; // assign each thread an index so that threads in the same warp process // elements in the same column. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // assign each thread an index so that threads in the same warp process // elements in the same column. const uint col_i = threadIdx.y + 33 * threadIdx.x; // in order to process the matrix column-by-column... all warps must // finish initializing shared memory row-by-row. __syncthreads(); // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single column. // each thread in the warp computes the power of a single element. while (pow > 0) { // Results from number theory: Additive group of integers mod n is // generated by all integers m relatively prime to n. A warp conflict occurs // if two threads in a warp access the same address mod 32. We // minimize bank conflicts by reading and writing data to shared memory // with a stride m relatively prime to n. // // Even though we are reading data column-by-column, we don't have // bank conflicts since our stride is relatively prime to 32. // For larger matrices (size n), we should choose a stride that is // relatively prime to 32. It is useful to note that for any integer n, // gcd(n, n + 1) = 1. s_res[col_i] *= s_a[col_i]; pow -= 1; } // in order to read the matrix row-by-row... all warps must // finish initializing shared memory column-by-column. __syncthreads(); // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; };
#include <hip/hip_runtime.h> /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * Shared memory is necessary here because we are reading and writing * to memory many times... * * Note that __syncthreads is not needed here because each row in shared * memory is exclusively read and written to by a single warp. */ __global__ void pow_rows(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[32 * 32]; // store result in shared memory for fast writes. __shared__ float s_res[32 * 32]; // assign each thread an index so that threads in the same warp process // elements in the same row. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single row. // each thread in the warp computes the power of a single element. while (pow > 0) { s_res[row_i] *= s_a[row_i]; pow -= 1; } // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; }; /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * After reading the matrix a into local memory row by row, we * compute the power of each element on a column by column basis * in order to cause a bank conflict. * * Note that __syncthreads is necessary here because the same shared * memory is accessed by multiple warps. */ __global__ void pow_cols(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[32 * 32]; // store result in shared memory for fast writes. __shared__ float s_res[32 * 32]; // assign each thread an index so that threads in the same warp process // elements in the same row. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // in order to process the matrix column-by-column... all warps must // finish initializing shared memory row-by-row. __syncthreads(); // assign each thread an index so that threads in the same warp process // elements in the same column. const uint col_i = threadIdx.y + 32 * threadIdx.x; // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single column. // each thread in the warp computes the power of a single element. while (pow > 0) { // Note that col_i % 32 = threadIdx.y. // Since all threads in the same warp have the same threadIdx.y, this // is a 32-way bank conflict! s_res[col_i] *= s_a[col_i]; pow -= 1; } // in order to read the matrix row-by-row... all warps must // finish initializing shared memory column-by-column. __syncthreads(); // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; }; /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * After reading the matrix a into local memory row by row, we * compute the power of each element on a column by column basis. * Due to zero padding, we don't have a bank conflict. * * Note that __syncthreads is necessary here because the same shared * memory is accessed by multiple warps. */ __global__ void pow_cols_pad(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[33 * 33]; // store result in shared memory for fast writes. __shared__ float s_res[33 * 33]; // assign each thread an index so that threads in the same warp process // elements in the same column. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // assign each thread an index so that threads in the same warp process // elements in the same column. const uint col_i = threadIdx.y + 33 * threadIdx.x; // in order to process the matrix column-by-column... all warps must // finish initializing shared memory row-by-row. __syncthreads(); // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single column. // each thread in the warp computes the power of a single element. while (pow > 0) { // Results from number theory: Additive group of integers mod n is // generated by all integers m relatively prime to n. A warp conflict occurs // if two threads in a warp access the same address mod 32. We // minimize bank conflicts by reading and writing data to shared memory // with a stride m relatively prime to n. // // Even though we are reading data column-by-column, we don't have // bank conflicts since our stride is relatively prime to 32. // For larger matrices (size n), we should choose a stride that is // relatively prime to 32. It is useful to note that for any integer n, // gcd(n, n + 1) = 1. s_res[col_i] *= s_a[col_i]; pow -= 1; } // in order to read the matrix row-by-row... all warps must // finish initializing shared memory column-by-column. __syncthreads(); // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; };
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * Shared memory is necessary here because we are reading and writing * to memory many times... * * Note that __syncthreads is not needed here because each row in shared * memory is exclusively read and written to by a single warp. */ __global__ void pow_rows(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[32 * 32]; // store result in shared memory for fast writes. __shared__ float s_res[32 * 32]; // assign each thread an index so that threads in the same warp process // elements in the same row. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single row. // each thread in the warp computes the power of a single element. while (pow > 0) { s_res[row_i] *= s_a[row_i]; pow -= 1; } // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; }; /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * After reading the matrix a into local memory row by row, we * compute the power of each element on a column by column basis * in order to cause a bank conflict. * * Note that __syncthreads is necessary here because the same shared * memory is accessed by multiple warps. */ __global__ void pow_cols(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[32 * 32]; // store result in shared memory for fast writes. __shared__ float s_res[32 * 32]; // assign each thread an index so that threads in the same warp process // elements in the same row. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // in order to process the matrix column-by-column... all warps must // finish initializing shared memory row-by-row. __syncthreads(); // assign each thread an index so that threads in the same warp process // elements in the same column. const uint col_i = threadIdx.y + 32 * threadIdx.x; // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single column. // each thread in the warp computes the power of a single element. while (pow > 0) { // Note that col_i % 32 = threadIdx.y. // Since all threads in the same warp have the same threadIdx.y, this // is a 32-way bank conflict! s_res[col_i] *= s_a[col_i]; pow -= 1; } // in order to read the matrix row-by-row... all warps must // finish initializing shared memory column-by-column. __syncthreads(); // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; }; /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * After reading the matrix a into local memory row by row, we * compute the power of each element on a column by column basis. * Due to zero padding, we don't have a bank conflict. * * Note that __syncthreads is necessary here because the same shared * memory is accessed by multiple warps. */ __global__ void pow_cols_pad(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[33 * 33]; // store result in shared memory for fast writes. __shared__ float s_res[33 * 33]; // assign each thread an index so that threads in the same warp process // elements in the same column. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // assign each thread an index so that threads in the same warp process // elements in the same column. const uint col_i = threadIdx.y + 33 * threadIdx.x; // in order to process the matrix column-by-column... all warps must // finish initializing shared memory row-by-row. __syncthreads(); // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single column. // each thread in the warp computes the power of a single element. while (pow > 0) { // Results from number theory: Additive group of integers mod n is // generated by all integers m relatively prime to n. A warp conflict occurs // if two threads in a warp access the same address mod 32. We // minimize bank conflicts by reading and writing data to shared memory // with a stride m relatively prime to n. // // Even though we are reading data column-by-column, we don't have // bank conflicts since our stride is relatively prime to 32. // For larger matrices (size n), we should choose a stride that is // relatively prime to 32. It is useful to note that for any integer n, // gcd(n, n + 1) = 1. s_res[col_i] *= s_a[col_i]; pow -= 1; } // in order to read the matrix row-by-row... all warps must // finish initializing shared memory column-by-column. __syncthreads(); // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; };
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z8pow_rowsPKfjPf .globl _Z8pow_rowsPKfjPf .p2align 8 .type _Z8pow_rowsPKfjPf,@function _Z8pow_rowsPKfjPf: s_load_b32 s2, s[0:1], 0x8 v_lshrrev_b32_e32 v1, 5, v0 v_and_b32_e32 v0, 0x3ff, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_mov_b32 v2, 1.0 :: v_dual_and_b32 v1, 0x7fe0, v1 v_add_nc_u32_e32 v0, v1, v0 s_delay_alu instid0(VALU_DEP_1) v_lshlrev_b32_e32 v1, 2, v0 s_waitcnt lgkmcnt(0) s_cmp_eq_u32 s2, 0 ds_store_b32 v1, v2 s_cbranch_scc1 .LBB0_4 s_load_b64 s[4:5], s[0:1], 0x0 v_lshlrev_b32_e32 v2, 2, v0 s_waitcnt lgkmcnt(0) global_load_b32 v3, v2, s[4:5] ds_load_b32 v2, v1 .LBB0_2: s_waitcnt vmcnt(0) lgkmcnt(0) v_mul_f32_e32 v2, v3, v2 s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lg_u32 s2, 0 s_cbranch_scc1 .LBB0_2 ds_store_b32 v1, v2 .LBB0_4: s_load_b64 s[0:1], s[0:1], 0x10 ds_load_b32 v1, v1 v_lshlrev_b32_e32 v0, 2, v0 s_waitcnt lgkmcnt(0) global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z8pow_rowsPKfjPf .amdhsa_group_segment_fixed_size 4096 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 6 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z8pow_rowsPKfjPf, .Lfunc_end0-_Z8pow_rowsPKfjPf .section .AMDGPU.csdata,"",@progbits .text .protected _Z8pow_colsPKfjPf .globl _Z8pow_colsPKfjPf .p2align 8 .type _Z8pow_colsPKfjPf,@function _Z8pow_colsPKfjPf: s_clause 0x1 s_load_b64 s[4:5], s[0:1], 0x0 s_load_b32 s2, s[0:1], 0x8 v_dual_mov_b32 v6, 1.0 :: v_dual_and_b32 v1, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshl_add_u32 v2, v0, 5, v1 v_lshlrev_b32_e32 v4, 2, v2 s_delay_alu instid0(VALU_DEP_1) v_add_nc_u32_e32 v3, 0x1000, v4 s_waitcnt lgkmcnt(0) global_load_b32 v5, v4, s[4:5] s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) ds_store_2addr_stride64_b32 v4, v5, v6 offset1:16 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB1_4 v_lshlrev_b32_e32 v1, 5, v1 s_delay_alu instid0(VALU_DEP_1) v_add_lshl_u32 v4, v1, v0, 2 ds_load_2addr_stride64_b32 v[0:1], v4 offset1:16 v_add_nc_u32_e32 v4, 0x1000, v4 .LBB1_2: s_waitcnt lgkmcnt(0) v_mul_f32_e32 v1, v0, v1 s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lg_u32 s2, 0 s_cbranch_scc1 .LBB1_2 ds_store_b32 v4, v1 .LBB1_4: s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv ds_load_b32 v0, v3 v_lshlrev_b32_e32 v1, 2, v2 s_waitcnt lgkmcnt(0) global_store_b32 v1, v0, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z8pow_colsPKfjPf .amdhsa_group_segment_fixed_size 8192 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 6 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z8pow_colsPKfjPf, .Lfunc_end1-_Z8pow_colsPKfjPf .section .AMDGPU.csdata,"",@progbits .text .protected _Z12pow_cols_padPKfjPf .globl _Z12pow_cols_padPKfjPf .p2align 8 .type _Z12pow_cols_padPKfjPf,@function _Z12pow_cols_padPKfjPf: s_clause 0x1 s_load_b64 s[4:5], s[0:1], 0x0 s_load_b32 s2, s[0:1], 0x8 v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshl_add_u32 v0, v3, 5, v2 v_lshlrev_b32_e32 v4, 2, v0 s_delay_alu instid0(VALU_DEP_1) v_dual_mov_b32 v6, 1.0 :: v_dual_add_nc_u32 v1, 0x1110, v4 s_waitcnt lgkmcnt(0) global_load_b32 v5, v4, s[4:5] s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) ds_store_b32 v4, v5 ds_store_b32 v4, v6 offset:4368 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB2_4 v_mul_u32_u24_e32 v2, 33, v2 s_delay_alu instid0(VALU_DEP_1) v_add_lshl_u32 v4, v2, v3, 2 ds_load_b32 v2, v4 ds_load_b32 v3, v4 offset:4368 v_add_nc_u32_e32 v4, 0x1110, v4 .LBB2_2: s_waitcnt lgkmcnt(0) v_mul_f32_e32 v3, v2, v3 s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lg_u32 s2, 0 s_cbranch_scc1 .LBB2_2 ds_store_b32 v4, v3 .LBB2_4: s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv ds_load_b32 v1, v1 v_lshlrev_b32_e32 v0, 2, v0 s_waitcnt lgkmcnt(0) global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12pow_cols_padPKfjPf .amdhsa_group_segment_fixed_size 8724 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 6 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z12pow_cols_padPKfjPf, .Lfunc_end2-_Z12pow_cols_padPKfjPf .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 4096 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z8pow_rowsPKfjPf .private_segment_fixed_size: 0 .sgpr_count: 6 .sgpr_spill_count: 0 .symbol: _Z8pow_rowsPKfjPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 8192 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z8pow_colsPKfjPf .private_segment_fixed_size: 0 .sgpr_count: 6 .sgpr_spill_count: 0 .symbol: _Z8pow_colsPKfjPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 8724 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12pow_cols_padPKfjPf .private_segment_fixed_size: 0 .sgpr_count: 6 .sgpr_spill_count: 0 .symbol: _Z12pow_cols_padPKfjPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * Shared memory is necessary here because we are reading and writing * to memory many times... * * Note that __syncthreads is not needed here because each row in shared * memory is exclusively read and written to by a single warp. */ __global__ void pow_rows(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[32 * 32]; // store result in shared memory for fast writes. __shared__ float s_res[32 * 32]; // assign each thread an index so that threads in the same warp process // elements in the same row. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single row. // each thread in the warp computes the power of a single element. while (pow > 0) { s_res[row_i] *= s_a[row_i]; pow -= 1; } // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; }; /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * After reading the matrix a into local memory row by row, we * compute the power of each element on a column by column basis * in order to cause a bank conflict. * * Note that __syncthreads is necessary here because the same shared * memory is accessed by multiple warps. */ __global__ void pow_cols(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[32 * 32]; // store result in shared memory for fast writes. __shared__ float s_res[32 * 32]; // assign each thread an index so that threads in the same warp process // elements in the same row. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // in order to process the matrix column-by-column... all warps must // finish initializing shared memory row-by-row. __syncthreads(); // assign each thread an index so that threads in the same warp process // elements in the same column. const uint col_i = threadIdx.y + 32 * threadIdx.x; // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single column. // each thread in the warp computes the power of a single element. while (pow > 0) { // Note that col_i % 32 = threadIdx.y. // Since all threads in the same warp have the same threadIdx.y, this // is a 32-way bank conflict! s_res[col_i] *= s_a[col_i]; pow -= 1; } // in order to read the matrix row-by-row... all warps must // finish initializing shared memory column-by-column. __syncthreads(); // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; }; /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * After reading the matrix a into local memory row by row, we * compute the power of each element on a column by column basis. * Due to zero padding, we don't have a bank conflict. * * Note that __syncthreads is necessary here because the same shared * memory is accessed by multiple warps. */ __global__ void pow_cols_pad(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[33 * 33]; // store result in shared memory for fast writes. __shared__ float s_res[33 * 33]; // assign each thread an index so that threads in the same warp process // elements in the same column. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // assign each thread an index so that threads in the same warp process // elements in the same column. const uint col_i = threadIdx.y + 33 * threadIdx.x; // in order to process the matrix column-by-column... all warps must // finish initializing shared memory row-by-row. __syncthreads(); // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single column. // each thread in the warp computes the power of a single element. while (pow > 0) { // Results from number theory: Additive group of integers mod n is // generated by all integers m relatively prime to n. A warp conflict occurs // if two threads in a warp access the same address mod 32. We // minimize bank conflicts by reading and writing data to shared memory // with a stride m relatively prime to n. // // Even though we are reading data column-by-column, we don't have // bank conflicts since our stride is relatively prime to 32. // For larger matrices (size n), we should choose a stride that is // relatively prime to 32. It is useful to note that for any integer n, // gcd(n, n + 1) = 1. s_res[col_i] *= s_a[col_i]; pow -= 1; } // in order to read the matrix row-by-row... all warps must // finish initializing shared memory column-by-column. __syncthreads(); // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; };
.text .file "example.hip" .globl _Z23__device_stub__pow_rowsPKfjPf # -- Begin function _Z23__device_stub__pow_rowsPKfjPf .p2align 4, 0x90 .type _Z23__device_stub__pow_rowsPKfjPf,@function _Z23__device_stub__pow_rowsPKfjPf: # @_Z23__device_stub__pow_rowsPKfjPf .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movq %rdx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8pow_rowsPKfjPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z23__device_stub__pow_rowsPKfjPf, .Lfunc_end0-_Z23__device_stub__pow_rowsPKfjPf .cfi_endproc # -- End function .globl _Z23__device_stub__pow_colsPKfjPf # -- Begin function _Z23__device_stub__pow_colsPKfjPf .p2align 4, 0x90 .type _Z23__device_stub__pow_colsPKfjPf,@function _Z23__device_stub__pow_colsPKfjPf: # @_Z23__device_stub__pow_colsPKfjPf .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movq %rdx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8pow_colsPKfjPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end1: .size _Z23__device_stub__pow_colsPKfjPf, .Lfunc_end1-_Z23__device_stub__pow_colsPKfjPf .cfi_endproc # -- End function .globl _Z27__device_stub__pow_cols_padPKfjPf # -- Begin function _Z27__device_stub__pow_cols_padPKfjPf .p2align 4, 0x90 .type _Z27__device_stub__pow_cols_padPKfjPf,@function _Z27__device_stub__pow_cols_padPKfjPf: # @_Z27__device_stub__pow_cols_padPKfjPf .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movq %rdx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12pow_cols_padPKfjPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end2: .size _Z27__device_stub__pow_cols_padPKfjPf, .Lfunc_end2-_Z27__device_stub__pow_cols_padPKfjPf .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8pow_rowsPKfjPf, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8pow_colsPKfjPf, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12pow_cols_padPKfjPf, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z8pow_rowsPKfjPf,@object # @_Z8pow_rowsPKfjPf .section .rodata,"a",@progbits .globl _Z8pow_rowsPKfjPf .p2align 3, 0x0 _Z8pow_rowsPKfjPf: .quad _Z23__device_stub__pow_rowsPKfjPf .size _Z8pow_rowsPKfjPf, 8 .type _Z8pow_colsPKfjPf,@object # @_Z8pow_colsPKfjPf .globl _Z8pow_colsPKfjPf .p2align 3, 0x0 _Z8pow_colsPKfjPf: .quad _Z23__device_stub__pow_colsPKfjPf .size _Z8pow_colsPKfjPf, 8 .type _Z12pow_cols_padPKfjPf,@object # @_Z12pow_cols_padPKfjPf .globl _Z12pow_cols_padPKfjPf .p2align 3, 0x0 _Z12pow_cols_padPKfjPf: .quad _Z27__device_stub__pow_cols_padPKfjPf .size _Z12pow_cols_padPKfjPf, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z8pow_rowsPKfjPf" .size .L__unnamed_1, 18 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z8pow_colsPKfjPf" .size .L__unnamed_2, 18 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z12pow_cols_padPKfjPf" .size .L__unnamed_3, 23 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z23__device_stub__pow_rowsPKfjPf .addrsig_sym _Z23__device_stub__pow_colsPKfjPf .addrsig_sym _Z27__device_stub__pow_cols_padPKfjPf .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z8pow_rowsPKfjPf .addrsig_sym _Z8pow_colsPKfjPf .addrsig_sym _Z12pow_cols_padPKfjPf .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z12pow_cols_padPKfjPf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_TID.Y ; /* 0x0000000000047919 */ /* 0x000e220000002200 */ /*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e240000002100 */ /*0050*/ LEA R0, R4, R5, 0x5 ; /* 0x0000000504007211 */ /* 0x001fca00078e28ff */ /*0060*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fcc00078e0003 */ /*0070*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*0080*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */ /* 0x000fe20003f05270 */ /*0090*/ HFMA2.MMA R7, -RZ, RZ, 1.875, 0 ; /* 0x3f800000ff077435 */ /* 0x000fce00000001ff */ /*00a0*/ STS [R0.X4+0x1104], R7 ; /* 0x0011040700007388 */ /* 0x0001e80000004800 */ /*00b0*/ STS [R0.X4], R3 ; /* 0x0000000300007388 */ /* 0x0041e80000004800 */ /*00c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*00d0*/ @!P0 BRA 0x4d0 ; /* 0x000003f000008947 */ /* 0x000fea0003800000 */ /*00e0*/ IMAD R2, R5, 0x21, R4 ; /* 0x0000002105027824 */ /* 0x001fe200078e0204 */ /*00f0*/ MOV R5, c[0x0][0x168] ; /* 0x00005a0000057a02 */ /* 0x000fc80000000f00 */ /*0100*/ LDS R3, [R2.X4] ; /* 0x0000000002037984 */ /* 0x0000620000004800 */ /*0110*/ IADD3 R4, R5.reuse, -0x1, RZ ; /* 0xffffffff05047810 */ /* 0x040fe40007ffe0ff */ /*0120*/ LOP3.LUT P1, R5, R5, 0x3, RZ, 0xc0, !PT ; /* 0x0000000305057812 */ /* 0x000fe2000782c0ff */ /*0130*/ LDS R6, [R2.X4+0x1104] ; /* 0x0011040002067984 */ /* 0x0000a20000004800 */ /*0140*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */ /* 0x000fe40003f06070 */ /*0150*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */ /* 0x000fd20000000f00 */ /*0160*/ @!P1 BRA 0x1d0 ; /* 0x0000006000009947 */ /* 0x000fea0003800000 */ /*0170*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */ /* 0x001fe40000000f00 */ /*0180*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */ /* 0x000fe20007ffe0ff */ /*0190*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x006fe20000400000 */ /*01a0*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */ /* 0x000fe40007ffe0ff */ /*01b0*/ ISETP.NE.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fda0003f25270 */ /*01c0*/ @P1 BRA 0x180 ; /* 0xffffffb000001947 */ /* 0x000fea000383ffff */ /*01d0*/ @!P0 BRA 0x4c0 ; /* 0x000002e000008947 */ /* 0x001fea0003800000 */ /*01e0*/ ISETP.GT.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fda0003f04270 */ /*01f0*/ @!P0 BRA 0x450 ; /* 0x0000025000008947 */ /* 0x000fea0003800000 */ /*0200*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */ /* 0x000fe40003f24270 */ /*0210*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*0220*/ @!P1 BRA 0x370 ; /* 0x0000014000009947 */ /* 0x000fea0003800000 */ /*0230*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0240*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x006fe20000400000 */ /*0250*/ IADD3 R4, R4, -0x10, RZ ; /* 0xfffffff004047810 */ /* 0x000fc60007ffe0ff */ /*0260*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fe20000400000 */ /*0270*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */ /* 0x000fc60003f24270 */ /*0280*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0290*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02a0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02b0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02c0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02d0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02e0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02f0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0300*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0310*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0320*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0330*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0340*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0350*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fe20000400000 */ /*0360*/ @P1 BRA 0x240 ; /* 0xfffffed000001947 */ /* 0x000fea000383ffff */ /*0370*/ ISETP.GT.AND P1, PT, R4, 0x4, PT ; /* 0x000000040400780c */ /* 0x000fda0003f24270 */ /*0380*/ @!P1 BRA 0x430 ; /* 0x000000a000009947 */ /* 0x000fea0003800000 */ /*0390*/ FMUL R6, R3.reuse, R6 ; /* 0x0000000603067220 */ /* 0x046fe20000400000 */ /*03a0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*03b0*/ IADD3 R4, R4, -0x8, RZ ; /* 0xfffffff804047810 */ /* 0x000fe20007ffe0ff */ /*03c0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*03d0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*03e0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*03f0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0400*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0410*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0420*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fe40000400000 */ /*0430*/ ISETP.NE.OR P0, PT, R4, RZ, P0 ; /* 0x000000ff0400720c */ /* 0x000fda0000705670 */ /*0440*/ @!P0 BRA 0x4c0 ; /* 0x0000007000008947 */ /* 0x000fea0003800000 */ /*0450*/ IADD3 R4, R4, -0x4, RZ ; /* 0xfffffffc04047810 */ /* 0x000fe20007ffe0ff */ /*0460*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x006fc60000400000 */ /*0470*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fe20003f05270 */ /*0480*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0490*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*04a0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*04b0*/ @P0 BRA 0x450 ; /* 0xffffff9000000947 */ /* 0x000fea000383ffff */ /*04c0*/ STS [R2.X4+0x1104], R6 ; /* 0x0011040602007388 */ /* 0x0041e40000004800 */ /*04d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x001fe20000010000 */ /*04e0*/ LEA R2, P0, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000027a11 */ /* 0x000fc800078010ff */ /*04f0*/ LEA.HI.X R3, R0, c[0x0][0x174], RZ, 0x2, P0 ; /* 0x00005d0000037a11 */ /* 0x002fe200000f14ff */ /*0500*/ LDS R5, [R0.X4+0x1104] ; /* 0x0011040000057984 */ /* 0x000e280000004800 */ /*0510*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x001fe2000c101904 */ /*0520*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0530*/ BRA 0x530; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0540*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0550*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0560*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0570*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0580*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0590*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z8pow_colsPKfjPf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_TID.Y ; /* 0x0000000000047919 */ /* 0x000e220000002200 */ /*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e240000002100 */ /*0050*/ LEA R0, R4, R5, 0x5 ; /* 0x0000000504007211 */ /* 0x001fca00078e28ff */ /*0060*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fcc00078e0003 */ /*0070*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*0080*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */ /* 0x000fe20003f05270 */ /*0090*/ HFMA2.MMA R7, -RZ, RZ, 1.875, 0 ; /* 0x3f800000ff077435 */ /* 0x000fce00000001ff */ /*00a0*/ STS [R0.X4+0x1000], R7 ; /* 0x0010000700007388 */ /* 0x0001e80000004800 */ /*00b0*/ STS [R0.X4], R3 ; /* 0x0000000300007388 */ /* 0x0041e80000004800 */ /*00c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*00d0*/ @!P0 BRA 0x4d0 ; /* 0x000003f000008947 */ /* 0x000fea0003800000 */ /*00e0*/ LEA R2, R5, R4, 0x5 ; /* 0x0000000405027211 */ /* 0x001fe400078e28ff */ /*00f0*/ MOV R5, c[0x0][0x168] ; /* 0x00005a0000057a02 */ /* 0x000fc60000000f00 */ /*0100*/ LDS R3, [R2.X4] ; /* 0x0000000002037984 */ /* 0x0000620000004800 */ /*0110*/ IADD3 R4, R5.reuse, -0x1, RZ ; /* 0xffffffff05047810 */ /* 0x040fe40007ffe0ff */ /*0120*/ LOP3.LUT P1, R5, R5, 0x3, RZ, 0xc0, !PT ; /* 0x0000000305057812 */ /* 0x000fe2000782c0ff */ /*0130*/ LDS R6, [R2.X4+0x1000] ; /* 0x0010000002067984 */ /* 0x0000a20000004800 */ /*0140*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */ /* 0x000fe40003f06070 */ /*0150*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */ /* 0x000fd20000000f00 */ /*0160*/ @!P1 BRA 0x1d0 ; /* 0x0000006000009947 */ /* 0x000fea0003800000 */ /*0170*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */ /* 0x001fe40000000f00 */ /*0180*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */ /* 0x000fe20007ffe0ff */ /*0190*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x006fe20000400000 */ /*01a0*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */ /* 0x000fe40007ffe0ff */ /*01b0*/ ISETP.NE.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fda0003f25270 */ /*01c0*/ @P1 BRA 0x180 ; /* 0xffffffb000001947 */ /* 0x000fea000383ffff */ /*01d0*/ @!P0 BRA 0x4c0 ; /* 0x000002e000008947 */ /* 0x001fea0003800000 */ /*01e0*/ ISETP.GT.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fda0003f04270 */ /*01f0*/ @!P0 BRA 0x450 ; /* 0x0000025000008947 */ /* 0x000fea0003800000 */ /*0200*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */ /* 0x000fe40003f24270 */ /*0210*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*0220*/ @!P1 BRA 0x370 ; /* 0x0000014000009947 */ /* 0x000fea0003800000 */ /*0230*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0240*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x006fe20000400000 */ /*0250*/ IADD3 R4, R4, -0x10, RZ ; /* 0xfffffff004047810 */ /* 0x000fc60007ffe0ff */ /*0260*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fe20000400000 */ /*0270*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */ /* 0x000fc60003f24270 */ /*0280*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0290*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02a0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02b0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02c0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02d0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02e0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*02f0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0300*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0310*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0320*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0330*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0340*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0350*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fe20000400000 */ /*0360*/ @P1 BRA 0x240 ; /* 0xfffffed000001947 */ /* 0x000fea000383ffff */ /*0370*/ ISETP.GT.AND P1, PT, R4, 0x4, PT ; /* 0x000000040400780c */ /* 0x000fda0003f24270 */ /*0380*/ @!P1 BRA 0x430 ; /* 0x000000a000009947 */ /* 0x000fea0003800000 */ /*0390*/ FMUL R6, R3.reuse, R6 ; /* 0x0000000603067220 */ /* 0x046fe20000400000 */ /*03a0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*03b0*/ IADD3 R4, R4, -0x8, RZ ; /* 0xfffffff804047810 */ /* 0x000fe20007ffe0ff */ /*03c0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*03d0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*03e0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*03f0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0400*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0410*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0420*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fe40000400000 */ /*0430*/ ISETP.NE.OR P0, PT, R4, RZ, P0 ; /* 0x000000ff0400720c */ /* 0x000fda0000705670 */ /*0440*/ @!P0 BRA 0x4c0 ; /* 0x0000007000008947 */ /* 0x000fea0003800000 */ /*0450*/ IADD3 R4, R4, -0x4, RZ ; /* 0xfffffffc04047810 */ /* 0x000fe20007ffe0ff */ /*0460*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x006fc60000400000 */ /*0470*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fe20003f05270 */ /*0480*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*0490*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*04a0*/ FMUL R6, R3, R6 ; /* 0x0000000603067220 */ /* 0x000fc80000400000 */ /*04b0*/ @P0 BRA 0x450 ; /* 0xffffff9000000947 */ /* 0x000fea000383ffff */ /*04c0*/ STS [R2.X4+0x1000], R6 ; /* 0x0010000602007388 */ /* 0x0041e40000004800 */ /*04d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x001fe20000010000 */ /*04e0*/ LEA R2, P0, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000027a11 */ /* 0x000fc800078010ff */ /*04f0*/ LEA.HI.X R3, R0, c[0x0][0x174], RZ, 0x2, P0 ; /* 0x00005d0000037a11 */ /* 0x002fe200000f14ff */ /*0500*/ LDS R5, [R0.X4+0x1000] ; /* 0x0010000000057984 */ /* 0x000e280000004800 */ /*0510*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x001fe2000c101904 */ /*0520*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0530*/ BRA 0x530; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0540*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0550*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0560*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0570*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0580*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0590*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z8pow_rowsPKfjPf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */ /* 0x000e220000002200 */ /*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ LEA R0, R0, R3, 0x5 ; /* 0x0000000300007211 */ /* 0x001fca00078e28ff */ /*0060*/ IMAD.WIDE.U32 R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fcc00078e0005 */ /*0070*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*0080*/ HFMA2.MMA R5, -RZ, RZ, 1.875, 0 ; /* 0x3f800000ff057435 */ /* 0x000fe200000001ff */ /*0090*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */ /* 0x000fe40003f05270 */ /*00a0*/ MOV R4, 0x3f800000 ; /* 0x3f80000000047802 */ /* 0x000fc80000000f00 */ /*00b0*/ STS [R0.X4+0x1000], R5 ; /* 0x0010000500007388 */ /* 0x0001e80000004800 */ /*00c0*/ STS [R0.X4], R3 ; /* 0x0000000300007388 */ /* 0x0041e60000004800 */ /*00d0*/ @!P0 BRA 0x4c0 ; /* 0x000003e000008947 */ /* 0x000fea0003800000 */ /*00e0*/ MOV R2, c[0x0][0x168] ; /* 0x00005a0000027a02 */ /* 0x000fe40000000f00 */ /*00f0*/ MOV R5, c[0x0][0x168] ; /* 0x00005a0000057a02 */ /* 0x001fe40000000f00 */ /*0100*/ IADD3 R4, R2, -0x1, RZ ; /* 0xffffffff02047810 */ /* 0x000fc40007ffe0ff */ /*0110*/ LOP3.LUT P1, R2, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302027812 */ /* 0x000fe4000782c0ff */ /*0120*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */ /* 0x000fe40003f06070 */ /*0130*/ MOV R4, 0x3f800000 ; /* 0x3f80000000047802 */ /* 0x000fd20000000f00 */ /*0140*/ @!P1 BRA 0x1c0 ; /* 0x0000007000009947 */ /* 0x000fea0003800000 */ /*0150*/ HFMA2.MMA R4, -RZ, RZ, 1.875, 0 ; /* 0x3f800000ff047435 */ /* 0x000fe200000001ff */ /*0160*/ MOV R5, c[0x0][0x168] ; /* 0x00005a0000057a02 */ /* 0x000fd00000000f00 */ /*0170*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */ /* 0x000fe20007ffe0ff */ /*0180*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fe20000400000 */ /*0190*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */ /* 0x000fe40007ffe0ff */ /*01a0*/ ISETP.NE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fda0003f25270 */ /*01b0*/ @P1 BRA 0x170 ; /* 0xffffffb000001947 */ /* 0x000fea000383ffff */ /*01c0*/ @!P0 BRA 0x4b0 ; /* 0x000002e000008947 */ /* 0x000fea0003800000 */ /*01d0*/ ISETP.GT.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fda0003f04270 */ /*01e0*/ @!P0 BRA 0x440 ; /* 0x0000025000008947 */ /* 0x000fea0003800000 */ /*01f0*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */ /* 0x000fe40003f24270 */ /*0200*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*0210*/ @!P1 BRA 0x360 ; /* 0x0000014000009947 */ /* 0x000fea0003800000 */ /*0220*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0230*/ FMUL R2, R3, R4 ; /* 0x0000000403027220 */ /* 0x000fe20000400000 */ /*0240*/ IADD3 R5, R5, -0x10, RZ ; /* 0xfffffff005057810 */ /* 0x000fc60007ffe0ff */ /*0250*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fe20000400000 */ /*0260*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */ /* 0x000fc60003f24270 */ /*0270*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*0280*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*0290*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*02a0*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*02b0*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*02c0*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*02d0*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*02e0*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*02f0*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*0300*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*0310*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*0320*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*0330*/ FMUL R2, R3, R2 ; /* 0x0000000203027220 */ /* 0x000fc80000400000 */ /*0340*/ FMUL R4, R3, R2 ; /* 0x0000000203047220 */ /* 0x000fe20000400000 */ /*0350*/ @P1 BRA 0x230 ; /* 0xfffffed000001947 */ /* 0x000fea000383ffff */ /*0360*/ ISETP.GT.AND P1, PT, R5, 0x4, PT ; /* 0x000000040500780c */ /* 0x000fda0003f24270 */ /*0370*/ @!P1 BRA 0x420 ; /* 0x000000a000009947 */ /* 0x000fea0003800000 */ /*0380*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fe20000400000 */ /*0390*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*03a0*/ IADD3 R5, R5, -0x8, RZ ; /* 0xfffffff805057810 */ /* 0x000fe20007ffe0ff */ /*03b0*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*03c0*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*03d0*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*03e0*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*03f0*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*0400*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*0410*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fe40000400000 */ /*0420*/ ISETP.NE.OR P0, PT, R5, RZ, P0 ; /* 0x000000ff0500720c */ /* 0x000fda0000705670 */ /*0430*/ @!P0 BRA 0x4b0 ; /* 0x0000007000008947 */ /* 0x000fea0003800000 */ /*0440*/ IADD3 R5, R5, -0x4, RZ ; /* 0xfffffffc05057810 */ /* 0x000fe20007ffe0ff */ /*0450*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc60000400000 */ /*0460*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fe20003f05270 */ /*0470*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*0480*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*0490*/ FMUL R4, R3, R4 ; /* 0x0000000403047220 */ /* 0x000fc80000400000 */ /*04a0*/ @P0 BRA 0x440 ; /* 0xffffff9000000947 */ /* 0x000fea000383ffff */ /*04b0*/ STS [R0.X4+0x1000], R4 ; /* 0x0010000400007388 */ /* 0x0001e40000004800 */ /*04c0*/ LEA R2, P0, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000027a11 */ /* 0x000fc800078010ff */ /*04d0*/ LEA.HI.X R3, R0, c[0x0][0x174], RZ, 0x2, P0 ; /* 0x00005d0000037a11 */ /* 0x001fca00000f14ff */ /*04e0*/ STG.E [R2.64], R4 ; /* 0x0000000402007986 */ /* 0x000fe2000c101904 */ /*04f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0500*/ BRA 0x500; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0510*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0520*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0530*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0540*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0550*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0560*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0570*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0580*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0590*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z8pow_rowsPKfjPf .globl _Z8pow_rowsPKfjPf .p2align 8 .type _Z8pow_rowsPKfjPf,@function _Z8pow_rowsPKfjPf: s_load_b32 s2, s[0:1], 0x8 v_lshrrev_b32_e32 v1, 5, v0 v_and_b32_e32 v0, 0x3ff, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_mov_b32 v2, 1.0 :: v_dual_and_b32 v1, 0x7fe0, v1 v_add_nc_u32_e32 v0, v1, v0 s_delay_alu instid0(VALU_DEP_1) v_lshlrev_b32_e32 v1, 2, v0 s_waitcnt lgkmcnt(0) s_cmp_eq_u32 s2, 0 ds_store_b32 v1, v2 s_cbranch_scc1 .LBB0_4 s_load_b64 s[4:5], s[0:1], 0x0 v_lshlrev_b32_e32 v2, 2, v0 s_waitcnt lgkmcnt(0) global_load_b32 v3, v2, s[4:5] ds_load_b32 v2, v1 .LBB0_2: s_waitcnt vmcnt(0) lgkmcnt(0) v_mul_f32_e32 v2, v3, v2 s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lg_u32 s2, 0 s_cbranch_scc1 .LBB0_2 ds_store_b32 v1, v2 .LBB0_4: s_load_b64 s[0:1], s[0:1], 0x10 ds_load_b32 v1, v1 v_lshlrev_b32_e32 v0, 2, v0 s_waitcnt lgkmcnt(0) global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z8pow_rowsPKfjPf .amdhsa_group_segment_fixed_size 4096 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 6 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z8pow_rowsPKfjPf, .Lfunc_end0-_Z8pow_rowsPKfjPf .section .AMDGPU.csdata,"",@progbits .text .protected _Z8pow_colsPKfjPf .globl _Z8pow_colsPKfjPf .p2align 8 .type _Z8pow_colsPKfjPf,@function _Z8pow_colsPKfjPf: s_clause 0x1 s_load_b64 s[4:5], s[0:1], 0x0 s_load_b32 s2, s[0:1], 0x8 v_dual_mov_b32 v6, 1.0 :: v_dual_and_b32 v1, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshl_add_u32 v2, v0, 5, v1 v_lshlrev_b32_e32 v4, 2, v2 s_delay_alu instid0(VALU_DEP_1) v_add_nc_u32_e32 v3, 0x1000, v4 s_waitcnt lgkmcnt(0) global_load_b32 v5, v4, s[4:5] s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) ds_store_2addr_stride64_b32 v4, v5, v6 offset1:16 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB1_4 v_lshlrev_b32_e32 v1, 5, v1 s_delay_alu instid0(VALU_DEP_1) v_add_lshl_u32 v4, v1, v0, 2 ds_load_2addr_stride64_b32 v[0:1], v4 offset1:16 v_add_nc_u32_e32 v4, 0x1000, v4 .LBB1_2: s_waitcnt lgkmcnt(0) v_mul_f32_e32 v1, v0, v1 s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lg_u32 s2, 0 s_cbranch_scc1 .LBB1_2 ds_store_b32 v4, v1 .LBB1_4: s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv ds_load_b32 v0, v3 v_lshlrev_b32_e32 v1, 2, v2 s_waitcnt lgkmcnt(0) global_store_b32 v1, v0, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z8pow_colsPKfjPf .amdhsa_group_segment_fixed_size 8192 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 6 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z8pow_colsPKfjPf, .Lfunc_end1-_Z8pow_colsPKfjPf .section .AMDGPU.csdata,"",@progbits .text .protected _Z12pow_cols_padPKfjPf .globl _Z12pow_cols_padPKfjPf .p2align 8 .type _Z12pow_cols_padPKfjPf,@function _Z12pow_cols_padPKfjPf: s_clause 0x1 s_load_b64 s[4:5], s[0:1], 0x0 s_load_b32 s2, s[0:1], 0x8 v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshl_add_u32 v0, v3, 5, v2 v_lshlrev_b32_e32 v4, 2, v0 s_delay_alu instid0(VALU_DEP_1) v_dual_mov_b32 v6, 1.0 :: v_dual_add_nc_u32 v1, 0x1110, v4 s_waitcnt lgkmcnt(0) global_load_b32 v5, v4, s[4:5] s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) ds_store_b32 v4, v5 ds_store_b32 v4, v6 offset:4368 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB2_4 v_mul_u32_u24_e32 v2, 33, v2 s_delay_alu instid0(VALU_DEP_1) v_add_lshl_u32 v4, v2, v3, 2 ds_load_b32 v2, v4 ds_load_b32 v3, v4 offset:4368 v_add_nc_u32_e32 v4, 0x1110, v4 .LBB2_2: s_waitcnt lgkmcnt(0) v_mul_f32_e32 v3, v2, v3 s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lg_u32 s2, 0 s_cbranch_scc1 .LBB2_2 ds_store_b32 v4, v3 .LBB2_4: s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv ds_load_b32 v1, v1 v_lshlrev_b32_e32 v0, 2, v0 s_waitcnt lgkmcnt(0) global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12pow_cols_padPKfjPf .amdhsa_group_segment_fixed_size 8724 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 6 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z12pow_cols_padPKfjPf, .Lfunc_end2-_Z12pow_cols_padPKfjPf .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 4096 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z8pow_rowsPKfjPf .private_segment_fixed_size: 0 .sgpr_count: 6 .sgpr_spill_count: 0 .symbol: _Z8pow_rowsPKfjPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 8192 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z8pow_colsPKfjPf .private_segment_fixed_size: 0 .sgpr_count: 6 .sgpr_spill_count: 0 .symbol: _Z8pow_colsPKfjPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 8724 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12pow_cols_padPKfjPf .private_segment_fixed_size: 0 .sgpr_count: 6 .sgpr_spill_count: 0 .symbol: _Z12pow_cols_padPKfjPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001966bc_00000000-6_example.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z31__device_stub__Z8pow_rowsPKfjPfPKfjPf .type _Z31__device_stub__Z8pow_rowsPKfjPfPKfjPf, @function _Z31__device_stub__Z8pow_rowsPKfjPfPKfjPf: .LFB2051: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8pow_rowsPKfjPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z31__device_stub__Z8pow_rowsPKfjPfPKfjPf, .-_Z31__device_stub__Z8pow_rowsPKfjPfPKfjPf .globl _Z8pow_rowsPKfjPf .type _Z8pow_rowsPKfjPf, @function _Z8pow_rowsPKfjPf: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z8pow_rowsPKfjPfPKfjPf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z8pow_rowsPKfjPf, .-_Z8pow_rowsPKfjPf .globl _Z31__device_stub__Z8pow_colsPKfjPfPKfjPf .type _Z31__device_stub__Z8pow_colsPKfjPfPKfjPf, @function _Z31__device_stub__Z8pow_colsPKfjPfPKfjPf: .LFB2053: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 120(%rsp), %rax subq %fs:40, %rax jne .L16 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8pow_colsPKfjPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2053: .size _Z31__device_stub__Z8pow_colsPKfjPfPKfjPf, .-_Z31__device_stub__Z8pow_colsPKfjPfPKfjPf .globl _Z8pow_colsPKfjPf .type _Z8pow_colsPKfjPf, @function _Z8pow_colsPKfjPf: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z8pow_colsPKfjPfPKfjPf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _Z8pow_colsPKfjPf, .-_Z8pow_colsPKfjPf .globl _Z36__device_stub__Z12pow_cols_padPKfjPfPKfjPf .type _Z36__device_stub__Z12pow_cols_padPKfjPfPKfjPf, @function _Z36__device_stub__Z12pow_cols_padPKfjPfPKfjPf: .LFB2055: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L23 .L19: movq 120(%rsp), %rax subq %fs:40, %rax jne .L24 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12pow_cols_padPKfjPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L19 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE2055: .size _Z36__device_stub__Z12pow_cols_padPKfjPfPKfjPf, .-_Z36__device_stub__Z12pow_cols_padPKfjPfPKfjPf .globl _Z12pow_cols_padPKfjPf .type _Z12pow_cols_padPKfjPf, @function _Z12pow_cols_padPKfjPf: .LFB2056: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z12pow_cols_padPKfjPfPKfjPf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2056: .size _Z12pow_cols_padPKfjPf, .-_Z12pow_cols_padPKfjPf .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z12pow_cols_padPKfjPf" .LC1: .string "_Z8pow_colsPKfjPf" .LC2: .string "_Z8pow_rowsPKfjPf" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2058: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z12pow_cols_padPKfjPf(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z8pow_colsPKfjPf(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z8pow_rowsPKfjPf(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "example.hip" .globl _Z23__device_stub__pow_rowsPKfjPf # -- Begin function _Z23__device_stub__pow_rowsPKfjPf .p2align 4, 0x90 .type _Z23__device_stub__pow_rowsPKfjPf,@function _Z23__device_stub__pow_rowsPKfjPf: # @_Z23__device_stub__pow_rowsPKfjPf .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movq %rdx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8pow_rowsPKfjPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z23__device_stub__pow_rowsPKfjPf, .Lfunc_end0-_Z23__device_stub__pow_rowsPKfjPf .cfi_endproc # -- End function .globl _Z23__device_stub__pow_colsPKfjPf # -- Begin function _Z23__device_stub__pow_colsPKfjPf .p2align 4, 0x90 .type _Z23__device_stub__pow_colsPKfjPf,@function _Z23__device_stub__pow_colsPKfjPf: # @_Z23__device_stub__pow_colsPKfjPf .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movq %rdx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8pow_colsPKfjPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end1: .size _Z23__device_stub__pow_colsPKfjPf, .Lfunc_end1-_Z23__device_stub__pow_colsPKfjPf .cfi_endproc # -- End function .globl _Z27__device_stub__pow_cols_padPKfjPf # -- Begin function _Z27__device_stub__pow_cols_padPKfjPf .p2align 4, 0x90 .type _Z27__device_stub__pow_cols_padPKfjPf,@function _Z27__device_stub__pow_cols_padPKfjPf: # @_Z27__device_stub__pow_cols_padPKfjPf .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movq %rdx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12pow_cols_padPKfjPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end2: .size _Z27__device_stub__pow_cols_padPKfjPf, .Lfunc_end2-_Z27__device_stub__pow_cols_padPKfjPf .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8pow_rowsPKfjPf, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8pow_colsPKfjPf, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12pow_cols_padPKfjPf, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z8pow_rowsPKfjPf,@object # @_Z8pow_rowsPKfjPf .section .rodata,"a",@progbits .globl _Z8pow_rowsPKfjPf .p2align 3, 0x0 _Z8pow_rowsPKfjPf: .quad _Z23__device_stub__pow_rowsPKfjPf .size _Z8pow_rowsPKfjPf, 8 .type _Z8pow_colsPKfjPf,@object # @_Z8pow_colsPKfjPf .globl _Z8pow_colsPKfjPf .p2align 3, 0x0 _Z8pow_colsPKfjPf: .quad _Z23__device_stub__pow_colsPKfjPf .size _Z8pow_colsPKfjPf, 8 .type _Z12pow_cols_padPKfjPf,@object # @_Z12pow_cols_padPKfjPf .globl _Z12pow_cols_padPKfjPf .p2align 3, 0x0 _Z12pow_cols_padPKfjPf: .quad _Z27__device_stub__pow_cols_padPKfjPf .size _Z12pow_cols_padPKfjPf, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z8pow_rowsPKfjPf" .size .L__unnamed_1, 18 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z8pow_colsPKfjPf" .size .L__unnamed_2, 18 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z12pow_cols_padPKfjPf" .size .L__unnamed_3, 23 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z23__device_stub__pow_rowsPKfjPf .addrsig_sym _Z23__device_stub__pow_colsPKfjPf .addrsig_sym _Z27__device_stub__pow_cols_padPKfjPf .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z8pow_rowsPKfjPf .addrsig_sym _Z8pow_colsPKfjPf .addrsig_sym _Z12pow_cols_padPKfjPf .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
/* Template code for convolution. CS6023, IITM */ #include<stdio.h> #include<cuda.h> #include<math.h> #define W 1024 // Input DIM #define OW (W-4) // Output DIM #define D 8 // Input and Kernel Depth #define T 5 // Kernel DIM #define N 128 // Number of kernels void fillMatrix(unsigned char *matrix){ unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix; for(int i=0;i<W;i++){ for(int j=0;j<W;j++){ for(int k=0;k<D;k++){ m[i][j][k]=(i*j+j*k+i*k+i*2+j*3+k*4)%255; } } } } void fillKernel(float *kernel){ float (*t)[T][T][D]=(float (*)[T][T][D])kernel; for(int i=0;i<N;i++){ for(int j=0;j<T;j++){ for(int k=0;k<T;k++){ for(int l=0;l<D;l++){ t[i][j][k][l]=fmod(-(i+1)*2.1+(j+1)*3.2-(k+1)*4.8+(l+1)*7.1,1.0); } } } } } void print_matrix_to_file(float *m){ const char *fname = "assignment4_out"; FILE *f = fopen(fname, "w"); float (*mat)[OW][OW]=(float (*)[OW][OW])m; for(unsigned i=0; i < N; i++) { for(unsigned j=0; j < OW; j++) for(unsigned k=0;k<OW;k++) fprintf(f,"%4f ", mat[i][j][k]); fprintf(f,"\n"); } fclose(f); } __global__ void conv(unsigned char *matrix,float *tile,float *output){ int filter=blockIdx.x; int eX=blockIdx.y; int eY=threadIdx.x; unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix; float (*t)[T][T][D]=(float (*)[T][T][D])tile; float (*o)[OW][OW]=(float (*)[OW][OW])output; __shared__ unsigned char slice[W][D]; float psum; if(eX<2||eX>W-3) return; for(int j=0;j<T;j++){ for(int i=0;i<D;i++){ slice[eY][i]=m[(eX+j-2)][eY][i]; } __syncthreads(); psum=0.0f; if(!(eY<2||eY>W-3)){ for(int k=0;k<T;k++){ for(int l=0;l<D;l++){ psum+=t[filter][j][k][l]*slice[eY+k-2][l]; } } atomicAdd(&o[filter][(eX-2)][eY-2],psum); } __syncthreads(); } } int main() { unsigned char *matrix=(unsigned char*)malloc(sizeof(unsigned char)*W*W*D); float *kernel=(float*)malloc(sizeof(float)*T*T*D*N); float *output=(float *)malloc(sizeof(float)*N*OW*OW); fillMatrix(matrix); fillKernel(kernel); unsigned char *Dmatrix;cudaMalloc(&Dmatrix,sizeof(unsigned char)*W*W*D); float *Dkernel;cudaMalloc(&Dkernel,sizeof(float)*N*T*T*D); float *Doutput;cudaMalloc(&Doutput,sizeof(float)*N*OW*OW); cudaMemcpy(Dmatrix, matrix, sizeof(unsigned char)*W*W*D,cudaMemcpyHostToDevice); cudaMemcpy(Dkernel, kernel, sizeof(float)*T*T*D*N,cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; cudaEventRecord(start,0); //Make your cuda kernel call conv<<<dim3(N,W),W>>>(Dmatrix,Dkernel,Doutput); cudaDeviceSynchronize(); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("%f\n",milliseconds); cudaMemcpy(output, Doutput, sizeof(float)*N*OW*OW,cudaMemcpyDeviceToHost); //Use print_matrix_to_file function only print_matrix_to_file(output); }
code for sm_80 Function : _Z4convPhPfS0_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R9, SR_CTAID.Y ; /* 0x0000000000097919 */ /* 0x000e240000002600 */ /*0020*/ IADD3 R9, R9, -0x2, RZ ; /* 0xfffffffe09097810 */ /* 0x001fc80007ffe0ff */ /*0030*/ ISETP.GT.U32.AND P0, PT, R9, 0x3fb, PT ; /* 0x000003fb0900780c */ /* 0x000fda0003f04070 */ /*0040*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0050*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e220000002100 */ /*0060*/ HFMA2.MMA R3, -RZ, RZ, 3.755092620849609375e-06, -3.814697265625e-06 ; /* 0x003f8040ff037435 */ /* 0x000fe200000001ff */ /*0070*/ MOV R7, 0x320 ; /* 0x0000032000077802 */ /* 0x000fe20000000f00 */ /*0080*/ HFMA2.MMA R22, -RZ, RZ, 0, 0 ; /* 0x00000000ff167435 */ /* 0x000fe200000001ff */ /*0090*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e620000002500 */ /*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*00b0*/ IMAD.WIDE R4, R0.reuse, 0x8, RZ ; /* 0x0000000800047825 */ /* 0x041fe200078e02ff */ /*00c0*/ IADD3 R11, R0, -0x2, RZ ; /* 0xfffffffe000b7810 */ /* 0x000fc80007ffe0ff */ /*00d0*/ IMAD.WIDE R2, R6, R3, c[0x0][0x170] ; /* 0x00005c0006027625 */ /* 0x002fe200078e0203 */ /*00e0*/ ISETP.GT.U32.AND P0, PT, R11, 0x3fb, PT ; /* 0x000003fb0b00780c */ /* 0x000fc60003f04070 */ /*00f0*/ IMAD.WIDE R4, R9, 0x2000, R4 ; /* 0x0000200009047825 */ /* 0x000fc800078e0204 */ /*0100*/ IMAD.WIDE R6, R6, R7, c[0x0][0x168] ; /* 0x00005a0006067625 */ /* 0x000fe200078e0207 */ /*0110*/ IADD3 R4, P1, R4, c[0x0][0x160], RZ ; /* 0x0000580004047a10 */ /* 0x000fc60007f3e0ff */ /*0120*/ IMAD.WIDE R2, R9, 0xff0, R2 ; /* 0x00000ff009027825 */ /* 0x000fe200078e0202 */ /*0130*/ IADD3 R6, P3, R6, 0x50, RZ ; /* 0x0000005006067810 */ /* 0x000fe40007f7e0ff */ /*0140*/ IADD3 R4, P2, R4, 0x3, RZ ; /* 0x0000000304047810 */ /* 0x000fe40007f5e0ff */ /*0150*/ IADD3.X R23, RZ, R7, RZ, P3, !PT ; /* 0x00000007ff177210 */ /* 0x000fe20001ffe4ff */ /*0160*/ IMAD.WIDE R2, R11, 0x4, R2 ; /* 0x000000040b027825 */ /* 0x000fe200078e0202 */ /*0170*/ IADD3.X R5, RZ, c[0x0][0x164], R5, P2, P1 ; /* 0x00005900ff057a10 */ /* 0x000fca00017e2405 */ /*0180*/ LDG.E.U8 R7, [R4.64+-0x3] ; /* 0xfffffd0404077981 */ /* 0x000ea8000c1e1100 */ /*0190*/ LDG.E.U8 R9, [R4.64+-0x2] ; /* 0xfffffe0404097981 */ /* 0x000ee8000c1e1100 */ /*01a0*/ LDG.E.U8 R11, [R4.64+-0x1] ; /* 0xffffff04040b7981 */ /* 0x001f28000c1e1100 */ /*01b0*/ LDG.E.U8 R13, [R4.64] ; /* 0x00000004040d7981 */ /* 0x000f68000c1e1100 */ /*01c0*/ LDG.E.U8 R15, [R4.64+0x1] ; /* 0x00000104040f7981 */ /* 0x000f68000c1e1100 */ /*01d0*/ LDG.E.U8 R17, [R4.64+0x2] ; /* 0x0000020404117981 */ /* 0x000f68000c1e1100 */ /*01e0*/ LDG.E.U8 R19, [R4.64+0x3] ; /* 0x0000030404137981 */ /* 0x000f68000c1e1100 */ /*01f0*/ LDG.E.U8 R21, [R4.64+0x4] ; /* 0x0000040404157981 */ /* 0x000f62000c1e1100 */ /*0200*/ BSSY B0, 0xce0 ; /* 0x00000ad000007945 */ /* 0x000fe60003800000 */ /*0210*/ STS.U8 [R0.X8], R7 ; /* 0x0000000700007388 */ /* 0x0041e80000008000 */ /*0220*/ STS.U8 [R0.X8+0x1], R9 ; /* 0x0000010900007388 */ /* 0x0083e80000008000 */ /*0230*/ STS.U8 [R0.X8+0x2], R11 ; /* 0x0000020b00007388 */ /* 0x0103e20000008000 */ /*0240*/ MOV R7, R23 ; /* 0x0000001700077202 */ /* 0x001fc60000000f00 */ /*0250*/ STS.U8 [R0.X8+0x3], R13 ; /* 0x0000030d00007388 */ /* 0x0203e80000008000 */ /*0260*/ STS.U8 [R0.X8+0x4], R15 ; /* 0x0000040f00007388 */ /* 0x0003e80000008000 */ /*0270*/ STS.U8 [R0.X8+0x5], R17 ; /* 0x0000051100007388 */ /* 0x0003e80000008000 */ /*0280*/ STS.U8 [R0.X8+0x6], R19 ; /* 0x0000061300007388 */ /* 0x0003e80000008000 */ /*0290*/ STS.U8 [R0.X8+0x7], R21 ; /* 0x0000071500007388 */ /* 0x0003e80000008000 */ /*02a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*02b0*/ @P0 BRA 0xcd0 ; /* 0x00000a1000000947 */ /* 0x000fea0003800000 */ /*02c0*/ LDG.E R26, [R6.64+-0x50] ; /* 0xffffb004061a7981 */ /* 0x002ea8000c1e1900 */ /*02d0*/ LDG.E R16, [R6.64+-0x4c] ; /* 0xffffb40406107981 */ /* 0x000ee8000c1e1900 */ /*02e0*/ LDG.E R14, [R6.64+-0x48] ; /* 0xffffb804060e7981 */ /* 0x000f28000c1e1900 */ /*02f0*/ LDG.E R13, [R6.64+-0x44] ; /* 0xffffbc04060d7981 */ /* 0x000f68000c1e1900 */ /*0300*/ LDG.E R12, [R6.64+-0x40] ; /* 0xffffc004060c7981 */ /* 0x000f68000c1e1900 */ /*0310*/ LDG.E R9, [R6.64+-0x3c] ; /* 0xffffc40406097981 */ /* 0x000f68000c1e1900 */ /*0320*/ LDG.E R8, [R6.64+-0x38] ; /* 0xffffc80406087981 */ /* 0x000f68000c1e1900 */ /*0330*/ LDG.E R10, [R6.64+-0x34] ; /* 0xffffcc04060a7981 */ /* 0x000f68000c1e1900 */ /*0340*/ LDG.E R11, [R6.64+-0x30] ; /* 0xffffd004060b7981 */ /* 0x000f68000c1e1900 */ /*0350*/ LDG.E R23, [R6.64+-0x2c] ; /* 0xffffd40406177981 */ /* 0x000f68000c1e1900 */ /*0360*/ LDG.E R19, [R6.64+-0x28] ; /* 0xffffd80406137981 */ /* 0x000f68000c1e1900 */ /*0370*/ LDS.U8 R17, [R0.X8+-0x10] ; /* 0xfffff00000117984 */ /* 0x000e280000008000 */ /*0380*/ LDG.E R20, [R6.64+-0x24] ; /* 0xffffdc0406147981 */ /* 0x000f68000c1e1900 */ /*0390*/ LDS.U8 R27, [R0.X8+-0xf] ; /* 0xfffff100001b7984 */ /* 0x000e680000008000 */ /*03a0*/ LDS.U8 R15, [R0.X8+-0xe] ; /* 0xfffff200000f7984 */ /* 0x000e680000008000 */ /*03b0*/ LDG.E R21, [R6.64+-0x20] ; /* 0xffffe00406157981 */ /* 0x000f68000c1e1900 */ /*03c0*/ LDS.U8 R24, [R0.X8+-0xd] ; /* 0xfffff30000187984 */ /* 0x000e680000008000 */ /*03d0*/ LDG.E R18, [R6.64+-0x1c] ; /* 0xffffe40406127981 */ /* 0x000f68000c1e1900 */ /*03e0*/ LDS.U8 R28, [R0.X8+-0x9] ; /* 0xfffff700001c7984 */ /* 0x000e620000008000 */ /*03f0*/ I2F.U16 R17, R17 ; /* 0x0000001100117306 */ /* 0x001eb00000101000 */ /*0400*/ I2F.U16 R27, R27 ; /* 0x0000001b001b7306 */ /* 0x002ef00000101000 */ /*0410*/ I2F.U16 R15, R15 ; /* 0x0000000f000f7306 */ /* 0x000f300000101000 */ /*0420*/ I2F.U16 R24, R24 ; /* 0x0000001800187306 */ /* 0x000f700000101000 */ /*0430*/ I2F.U16 R28, R28 ; /* 0x0000001c001c7306 */ /* 0x000fe20000101000 */ /*0440*/ FFMA R25, R17, R26, RZ ; /* 0x0000001a11197223 */ /* 0x004fc400000000ff */ /*0450*/ LDG.E R17, [R6.64+-0x14] ; /* 0xffffec0406117981 */ /* 0x000ea4000c1e1900 */ /*0460*/ FFMA R26, R27, R16, R25 ; /* 0x000000101b1a7223 */ /* 0x008fe40000000019 */ /*0470*/ LDS.U8 R25, [R0.X8+-0xc] ; /* 0xfffff40000197984 */ /* 0x000e280000008000 */ /*0480*/ LDG.E R16, [R6.64+-0x18] ; /* 0xffffe80406107981 */ /* 0x000ee2000c1e1900 */ /*0490*/ FFMA R14, R15, R14, R26 ; /* 0x0000000e0f0e7223 */ /* 0x010fc6000000001a */ /*04a0*/ LDS.U8 R26, [R0.X8+-0xb] ; /* 0xfffff500001a7984 */ /* 0x000e680000008000 */ /*04b0*/ LDS.U8 R27, [R0.X8+-0xa] ; /* 0xfffff600001b7984 */ /* 0x000f280000008000 */ /*04c0*/ LDG.E R15, [R6.64+-0x10] ; /* 0xfffff004060f7981 */ /* 0x000ea2000c1e1900 */ /*04d0*/ FFMA R13, R24, R13, R14 ; /* 0x0000000d180d7223 */ /* 0x020fc6000000000e */ /*04e0*/ LDG.E R14, [R6.64+-0xc] ; /* 0xfffff404060e7981 */ /* 0x000f68000c1e1900 */ /*04f0*/ LDS.U8 R24, [R0.X8+-0x8] ; /* 0xfffff80000187984 */ /* 0x000f220000008000 */ /*0500*/ I2F.U16 R25, R25 ; /* 0x0000001900197306 */ /* 0x001e300000101000 */ /*0510*/ I2F.U16 R26, R26 ; /* 0x0000001a001a7306 */ /* 0x002e620000101000 */ /*0520*/ FFMA R13, R25, R12, R13 ; /* 0x0000000c190d7223 */ /* 0x001fc4000000000d */ /*0530*/ LDG.E R12, [R6.64+-0x8] ; /* 0xfffff804060c7981 */ /* 0x000f64000c1e1900 */ /*0540*/ FFMA R9, R26, R9, R13 ; /* 0x000000091a097223 */ /* 0x002fe4000000000d */ /*0550*/ LDG.E R13, [R6.64+-0x4] ; /* 0xfffffc04060d7981 */ /* 0x000f62000c1e1900 */ /*0560*/ I2F.U16 R27, R27 ; /* 0x0000001b001b7306 */ /* 0x010e240000101000 */ /*0570*/ FFMA R25, R27, R8, R9 ; /* 0x000000081b197223 */ /* 0x001fe40000000009 */ /*0580*/ LDG.E R8, [R6.64] ; /* 0x0000000406087981 */ /* 0x000f28000c1e1900 */ /*0590*/ I2F.U16 R24, R24 ; /* 0x0000001800187306 */ /* 0x000e220000101000 */ /*05a0*/ LDG.E R9, [R6.64+0x4] ; /* 0x0000040406097981 */ /* 0x000f22000c1e1900 */ /*05b0*/ FFMA R26, R28, R10, R25 ; /* 0x0000000a1c1a7223 */ /* 0x000fc60000000019 */ /*05c0*/ LDG.E R10, [R6.64+0x8] ; /* 0x00000804060a7981 */ /* 0x000f28000c1e1900 */ /*05d0*/ LDS.U8 R25, [R0.X8+-0x7] ; /* 0xfffff90000197984 */ /* 0x000e680000008000 */ /*05e0*/ LDS.U8 R27, [R0.X8+-0x6] ; /* 0xfffffa00001b7984 */ /* 0x000e620000008000 */ /*05f0*/ FFMA R26, R24, R11, R26 ; /* 0x0000000b181a7223 */ /* 0x001fc6000000001a */ /*0600*/ LDG.E R11, [R6.64+0xc] ; /* 0x00000c04060b7981 */ /* 0x000f28000c1e1900 */ /*0610*/ LDS.U8 R28, [R0.X8+-0x5] ; /* 0xfffffb00001c7984 */ /* 0x000e220000008000 */ /*0620*/ I2F.U16 R25, R25 ; /* 0x0000001900197306 */ /* 0x002e640000101000 */ /*0630*/ FFMA R26, R25, R23, R26 ; /* 0x00000017191a7223 */ /* 0x002fe4000000001a */ /*0640*/ LDS.U8 R23, [R0.X8+-0x4] ; /* 0xfffffc0000177984 */ /* 0x000e680000008000 */ /*0650*/ I2F.U16 R27, R27 ; /* 0x0000001b001b7306 */ /* 0x000e300000101000 */ /*0660*/ I2F.U16 R28, R28 ; /* 0x0000001c001c7306 */ /* 0x001e220000101000 */ /*0670*/ FFMA R29, R27, R19, R26 ; /* 0x000000131b1d7223 */ /* 0x000fc4000000001a */ /*0680*/ LDG.E R19, [R6.64+0x10] ; /* 0x0000100406137981 */ /* 0x000f24000c1e1900 */ /*0690*/ FFMA R20, R28, R20, R29 ; /* 0x000000141c147223 */ /* 0x001fe4000000001d */ /*06a0*/ LDS.U8 R29, [R0.X8+-0x2] ; /* 0xfffffe00001d7984 */ /* 0x000e220000008000 */ /*06b0*/ I2F.U16 R24, R23 ; /* 0x0000001700187306 */ /* 0x0022660000101000 */ /*06c0*/ LDS.U8 R23, [R0.X8+-0x3] ; /* 0xfffffd0000177984 */ /* 0x002e620000008000 */ /*06d0*/ FFMA R25, R24, R21, R20 ; /* 0x0000001518197223 */ /* 0x000fc60000000014 */ /*06e0*/ LDS.U8 R24, [R0.X8+-0x1] ; /* 0xffffff0000187984 */ /* 0x000e680000008000 */ /*06f0*/ LDG.E R20, [R6.64+0x14] ; /* 0x0000140406147981 */ /* 0x000f28000c1e1900 */ /*0700*/ LDS.U8 R21, [R0.X8] ; /* 0x0000000000157984 */ /* 0x000e620000008000 */ /*0710*/ I2F.U16 R27, R29 ; /* 0x0000001d001b7306 */ /* 0x0011e60000101000 */ /*0720*/ LDS.U8 R29, [R0.X8+0xb] ; /* 0x00000b00001d7984 */ /* 0x001fea0000008000 */ /*0730*/ I2F.U16 R26, R23 ; /* 0x00000017001a7306 */ /* 0x0020700000101000 */ /*0740*/ I2F.U16 R24, R24 ; /* 0x0000001800187306 */ /* 0x000ea20000101000 */ /*0750*/ LDS.U8 R23, [R0.X8+0x4] ; /* 0x0000040000177984 */ /* 0x001fe20000008000 */ /*0760*/ FFMA R26, R26, R18, R25 ; /* 0x000000121a1a7223 */ /* 0x002fc60000000019 */ /*0770*/ LDS.U8 R25, [R0.X8+0x1] ; /* 0x0000010000197984 */ /* 0x000e280000008000 */ /*0780*/ LDG.E R18, [R6.64+0x18] ; /* 0x0000180406127981 */ /* 0x000f22000c1e1900 */ /*0790*/ I2F.U16 R21, R21 ; /* 0x0000001500157306 */ /* 0x000e620000101000 */ /*07a0*/ FFMA R16, R27, R16, R26 ; /* 0x000000101b107223 */ /* 0x008fe4000000001a */ /*07b0*/ LDS.U8 R26, [R0.X8+0x2] ; /* 0x00000200001a7984 */ /* 0x000ee40000008000 */ /*07c0*/ FFMA R28, R24, R17, R16 ; /* 0x00000011181c7223 */ /* 0x004fc40000000010 */ /*07d0*/ LDS.U8 R27, [R0.X8+0x3] ; /* 0x00000300001b7984 */ /* 0x000ea80000008000 */ /*07e0*/ LDG.E R16, [R6.64+0x1c] ; /* 0x00001c0406107981 */ /* 0x000f22000c1e1900 */ /*07f0*/ I2F.U16 R25, R25 ; /* 0x0000001900197306 */ /* 0x001f620000101000 */ /*0800*/ FFMA R17, R21, R15, R28 ; /* 0x0000000f15117223 */ /* 0x002fe4000000001c */ /*0810*/ LDG.E R15, [R6.64+0x20] ; /* 0x00002004060f7981 */ /* 0x000f28000c1e1900 */ /*0820*/ LDS.U8 R24, [R0.X8+0x5] ; /* 0x0000050000187984 */ /* 0x000e280000008000 */ /*0830*/ LDS.U8 R28, [R0.X8+0x7] ; /* 0x00000700001c7984 */ /* 0x000fe20000008000 */ /*0840*/ I2F.U16 R23, R23 ; /* 0x0000001700177306 */ /* 0x000fe60000101000 */ /*0850*/ LDG.E R21, [R6.64+0x3c] ; /* 0x00003c0406157981 */ /* 0x000f22000c1e1900 */ /*0860*/ FFMA R17, R25, R14, R17 ; /* 0x0000000e19117223 */ /* 0x020fc60000000011 */ /*0870*/ LDG.E R14, [R6.64+0x24] ; /* 0x00002404060e7981 */ /* 0x000f68000c1e1900 */ /*0880*/ LDS.U8 R25, [R0.X8+0x6] ; /* 0x0000060000197984 */ /* 0x000e620000008000 */ /*0890*/ I2F.U16 R26, R26 ; /* 0x0000001a001a7306 */ /* 0x008ef00000101000 */ /*08a0*/ I2F.U16 R27, R27 ; /* 0x0000001b001b7306 */ /* 0x004ea20000101000 */ /*08b0*/ FFMA R12, R26, R12, R17 ; /* 0x0000000c1a0c7223 */ /* 0x008fc40000000011 */ /*08c0*/ LDG.E R17, [R6.64+0x28] ; /* 0x0000280406117981 */ /* 0x000ee4000c1e1900 */ /*08d0*/ FFMA R13, R27, R13, R12 ; /* 0x0000000d1b0d7223 */ /* 0x004fe4000000000c */ /*08e0*/ LDG.E R12, [R6.64+0x2c] ; /* 0x00002c04060c7981 */ /* 0x000ea2000c1e1900 */ /*08f0*/ I2F.U16 R24, R24 ; /* 0x0000001800187306 */ /* 0x001e300000101000 */ /*0900*/ I2F.U16 R25, R25 ; /* 0x0000001900197306 */ /* 0x002e620000101000 */ /*0910*/ FFMA R26, R23, R8, R13 ; /* 0x00000008171a7223 */ /* 0x010fc4000000000d */ /*0920*/ LDG.E R8, [R6.64+0x30] ; /* 0x0000300406087981 */ /* 0x000f28000c1e1900 */ /*0930*/ LDG.E R13, [R6.64+0x34] ; /* 0x00003404060d7981 */ /* 0x000f22000c1e1900 */ /*0940*/ FFMA R27, R24, R9, R26 ; /* 0x00000009181b7223 */ /* 0x001fc6000000001a */ /*0950*/ LDG.E R9, [R6.64+0x38] ; /* 0x0000380406097981 */ /* 0x000f22000c1e1900 */ /*0960*/ I2F.U16 R28, R28 ; /* 0x0000001c001c7306 */ /* 0x000e260000101000 */ /*0970*/ LDG.E R23, [R6.64+0x40] ; /* 0x0000400406177981 */ /* 0x000f22000c1e1900 */ /*0980*/ FFMA R26, R25, R10, R27 ; /* 0x0000000a191a7223 */ /* 0x002fc6000000001b */ /*0990*/ LDG.E R10, [R6.64+0x44] ; /* 0x00004404060a7981 */ /* 0x000f28000c1e1900 */ /*09a0*/ LDG.E R24, [R6.64+0x48] ; /* 0x0000480406187981 */ /* 0x000f28000c1e1900 */ /*09b0*/ LDS.U8 R25, [R0.X8+0x8] ; /* 0x0000080000197984 */ /* 0x000e620000008000 */ /*09c0*/ FFMA R26, R28, R11, R26 ; /* 0x0000000b1c1a7223 */ /* 0x001fc6000000001a */ /*09d0*/ LDG.E R11, [R6.64+0x4c] ; /* 0x00004c04060b7981 */ /* 0x000f28000c1e1900 */ /*09e0*/ LDS.U8 R28, [R0.X8+0xa] ; /* 0x00000a00001c7984 */ /* 0x000e220000008000 */ /*09f0*/ I2F.U16 R25, R25 ; /* 0x0000001900197306 */ /* 0x002e640000101000 */ /*0a00*/ FFMA R19, R25, R19, R26 ; /* 0x0000001319137223 */ /* 0x002fe4000000001a */ /*0a10*/ LDS.U8 R26, [R0.X8+0x9] ; /* 0x00000900001a7984 */ /* 0x000e680000008000 */ /*0a20*/ I2F.U16 R28, R28 ; /* 0x0000001c001c7306 */ /* 0x001fe20000101000 */ /*0a30*/ LDS.U8 R25, [R0.X8+0x10] ; /* 0x0000100000197984 */ /* 0x000fee0000008000 */ /*0a40*/ I2F.U16 R29, R29 ; /* 0x0000001d001d7306 */ /* 0x000ff00000101000 */ /*0a50*/ I2F.U16 R26, R26 ; /* 0x0000001a001a7306 */ /* 0x002e240000101000 */ /*0a60*/ FFMA R27, R26, R20, R19 ; /* 0x000000141a1b7223 */ /* 0x001fc40000000013 */ /*0a70*/ LDS.U8 R19, [R0.X8+0xc] ; /* 0x00000c0000137984 */ /* 0x000e280000008000 */ /*0a80*/ LDS.U8 R20, [R0.X8+0xe] ; /* 0x00000e0000147984 */ /* 0x000e620000008000 */ /*0a90*/ FFMA R27, R28, R18, R27 ; /* 0x000000121c1b7223 */ /* 0x000fc6000000001b */ /*0aa0*/ LDS.U8 R18, [R0.X8+0xd] ; /* 0x00000d0000127984 */ /* 0x000f680000008000 */ /*0ab0*/ LDS.U8 R28, [R0.X8+0x13] ; /* 0x00001300001c7984 */ /* 0x000fe20000008000 */ /*0ac0*/ I2F.U16 R26, R19 ; /* 0x00000013001a7306 */ /* 0x001e300000101000 */ /*0ad0*/ I2F.U16 R20, R20 ; /* 0x0000001400147306 */ /* 0x002fe20000101000 */ /*0ae0*/ FFMA R27, R29, R16, R27 ; /* 0x000000101d1b7223 */ /* 0x000fc4000000001b */ /*0af0*/ LDS.U8 R16, [R0.X8+0xf] ; /* 0x00000f0000107984 */ /* 0x000e6a0000008000 */ /*0b00*/ I2F.U16 R18, R18 ; /* 0x0000001200127306 */ /* 0x020f620000101000 */ /*0b10*/ FFMA R15, R26, R15, R27 ; /* 0x0000000f1a0f7223 */ /* 0x001fe4000000001b */ /*0b20*/ LDS.U8 R26, [R0.X8+0x11] ; /* 0x00001100001a7984 */ /* 0x000e280000008000 */ /*0b30*/ LDS.U8 R27, [R0.X8+0x12] ; /* 0x00001200001b7984 */ /* 0x000e220000008000 */ /*0b40*/ FFMA R14, R18, R14, R15 ; /* 0x0000000e120e7223 */ /* 0x020fc6000000000f */ /*0b50*/ LDS.U8 R15, [R0.X8+0x15] ; /* 0x00001500000f7984 */ /* 0x000fe20000008000 */ /*0b60*/ I2F.U16 R16, R16 ; /* 0x0000001000107306 */ /* 0x002ea20000101000 */ /*0b70*/ FFMA R29, R20, R17, R14 ; /* 0x00000011141d7223 */ /* 0x008fe4000000000e */ /*0b80*/ LDS.U8 R17, [R0.X8+0x14] ; /* 0x0000140000117984 */ /* 0x000e680000008000 */ /*0b90*/ LDS.U8 R14, [R0.X8+0x16] ; /* 0x00001600000e7984 */ /* 0x000ee20000008000 */ /*0ba0*/ FFMA R19, R16, R12, R29 ; /* 0x0000000c10137223 */ /* 0x004fc6000000001d */ /*0bb0*/ LDS.U8 R12, [R0.X8+0x17] ; /* 0x00001700000c7984 */ /* 0x000ea20000008000 */ /*0bc0*/ I2F.U16 R25, R25 ; /* 0x0000001900197306 */ /* 0x000f300000101000 */ /*0bd0*/ I2F.U16 R26, R26 ; /* 0x0000001a001a7306 */ /* 0x001e300000101000 */ /*0be0*/ I2F.U16 R18, R27 ; /* 0x0000001b00127306 */ /* 0x000f620000101000 */ /*0bf0*/ FFMA R8, R25, R8, R19 ; /* 0x0000000819087223 */ /* 0x010fce0000000013 */ /*0c00*/ I2F.U16 R28, R28 ; /* 0x0000001c001c7306 */ /* 0x000f220000101000 */ /*0c10*/ FFMA R8, R26, R13, R8 ; /* 0x0000000d1a087223 */ /* 0x001fce0000000008 */ /*0c20*/ I2F.U16 R16, R17 ; /* 0x0000001100107306 */ /* 0x002e220000101000 */ /*0c30*/ FFMA R8, R18, R9, R8 ; /* 0x0000000912087223 */ /* 0x020fce0000000008 */ /*0c40*/ I2F.U16 R15, R15 ; /* 0x0000000f000f7306 */ /* 0x000e620000101000 */ /*0c50*/ FFMA R8, R28, R21, R8 ; /* 0x000000151c087223 */ /* 0x010fce0000000008 */ /*0c60*/ I2F.U16 R14, R14 ; /* 0x0000000e000e7306 */ /* 0x008ee20000101000 */ /*0c70*/ FFMA R23, R16, R23, R8 ; /* 0x0000001710177223 */ /* 0x001fce0000000008 */ /*0c80*/ I2F.U16 R12, R12 ; /* 0x0000000c000c7306 */ /* 0x004e220000101000 */ /*0c90*/ FFMA R23, R15, R10, R23 ; /* 0x0000000a0f177223 */ /* 0x002fc80000000017 */ /*0ca0*/ FFMA R24, R14, R24, R23 ; /* 0x000000180e187223 */ /* 0x008fc80000000017 */ /*0cb0*/ FFMA R11, R12, R11, R24 ; /* 0x0000000b0c0b7223 */ /* 0x001fca0000000018 */ /*0cc0*/ RED.E.ADD.F32.FTZ.RN.STRONG.GPU [R2.64], R11 ; /* 0x0000000b0200798e */ /* 0x0001e4000c10e784 */ /*0cd0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x002fea0003800000 */ /*0ce0*/ IADD3 R22, R22, 0x1, RZ ; /* 0x0000000116167810 */ /* 0x000fe20007ffe0ff */ /*0cf0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0d00*/ IADD3 R4, P1, R4, 0x2000, RZ ; /* 0x0000200004047810 */ /* 0x000fe40007f3e0ff */ /*0d10*/ ISETP.NE.AND P3, PT, R22, 0x5, PT ; /* 0x000000051600780c */ /* 0x000fe40003f65270 */ /*0d20*/ IADD3 R6, P2, R6, 0xa0, RZ ; /* 0x000000a006067810 */ /* 0x000fe40007f5e0ff */ /*0d30*/ IADD3.X R5, RZ, R5, RZ, P1, !PT ; /* 0x00000005ff057210 */ /* 0x000fe40000ffe4ff */ /*0d40*/ IADD3.X R23, RZ, R7, RZ, P2, !PT ; /* 0x00000007ff177210 */ /* 0x000fce00017fe4ff */ /*0d50*/ @P3 BRA 0x180 ; /* 0xfffff42000003947 */ /* 0x000fea000383ffff */ /*0d60*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0d70*/ BRA 0xd70; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0d80*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d90*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0da0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0db0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0dc0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0dd0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0de0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0df0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* Template code for convolution. CS6023, IITM */ #include<stdio.h> #include<cuda.h> #include<math.h> #define W 1024 // Input DIM #define OW (W-4) // Output DIM #define D 8 // Input and Kernel Depth #define T 5 // Kernel DIM #define N 128 // Number of kernels void fillMatrix(unsigned char *matrix){ unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix; for(int i=0;i<W;i++){ for(int j=0;j<W;j++){ for(int k=0;k<D;k++){ m[i][j][k]=(i*j+j*k+i*k+i*2+j*3+k*4)%255; } } } } void fillKernel(float *kernel){ float (*t)[T][T][D]=(float (*)[T][T][D])kernel; for(int i=0;i<N;i++){ for(int j=0;j<T;j++){ for(int k=0;k<T;k++){ for(int l=0;l<D;l++){ t[i][j][k][l]=fmod(-(i+1)*2.1+(j+1)*3.2-(k+1)*4.8+(l+1)*7.1,1.0); } } } } } void print_matrix_to_file(float *m){ const char *fname = "assignment4_out"; FILE *f = fopen(fname, "w"); float (*mat)[OW][OW]=(float (*)[OW][OW])m; for(unsigned i=0; i < N; i++) { for(unsigned j=0; j < OW; j++) for(unsigned k=0;k<OW;k++) fprintf(f,"%4f ", mat[i][j][k]); fprintf(f,"\n"); } fclose(f); } __global__ void conv(unsigned char *matrix,float *tile,float *output){ int filter=blockIdx.x; int eX=blockIdx.y; int eY=threadIdx.x; unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix; float (*t)[T][T][D]=(float (*)[T][T][D])tile; float (*o)[OW][OW]=(float (*)[OW][OW])output; __shared__ unsigned char slice[W][D]; float psum; if(eX<2||eX>W-3) return; for(int j=0;j<T;j++){ for(int i=0;i<D;i++){ slice[eY][i]=m[(eX+j-2)][eY][i]; } __syncthreads(); psum=0.0f; if(!(eY<2||eY>W-3)){ for(int k=0;k<T;k++){ for(int l=0;l<D;l++){ psum+=t[filter][j][k][l]*slice[eY+k-2][l]; } } atomicAdd(&o[filter][(eX-2)][eY-2],psum); } __syncthreads(); } } int main() { unsigned char *matrix=(unsigned char*)malloc(sizeof(unsigned char)*W*W*D); float *kernel=(float*)malloc(sizeof(float)*T*T*D*N); float *output=(float *)malloc(sizeof(float)*N*OW*OW); fillMatrix(matrix); fillKernel(kernel); unsigned char *Dmatrix;cudaMalloc(&Dmatrix,sizeof(unsigned char)*W*W*D); float *Dkernel;cudaMalloc(&Dkernel,sizeof(float)*N*T*T*D); float *Doutput;cudaMalloc(&Doutput,sizeof(float)*N*OW*OW); cudaMemcpy(Dmatrix, matrix, sizeof(unsigned char)*W*W*D,cudaMemcpyHostToDevice); cudaMemcpy(Dkernel, kernel, sizeof(float)*T*T*D*N,cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; cudaEventRecord(start,0); //Make your cuda kernel call conv<<<dim3(N,W),W>>>(Dmatrix,Dkernel,Doutput); cudaDeviceSynchronize(); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("%f\n",milliseconds); cudaMemcpy(output, Doutput, sizeof(float)*N*OW*OW,cudaMemcpyDeviceToHost); //Use print_matrix_to_file function only print_matrix_to_file(output); }
.file "tmpxft_00195afa_00000000-6_new.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z10fillMatrixPh .type _Z10fillMatrixPh, @function _Z10fillMatrixPh: .LFB2057: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 movq %rdi, %rbp movl $0, %r12d movl $0, %ebx jmp .L4 .L11: addl $1, %r8d addl %r11d, %r9d addq $8, %rsi cmpl $1024, %r8d je .L6 .L8: leal (%r10,%r8), %edi leaq -8(%rsi), %rcx movl %r9d, %edx .L5: movslq %edx, %rax imulq $-2139062143, %rax, %rax shrq $32, %rax addl %edx, %eax sarl $7, %eax movl %edx, %r13d sarl $31, %r13d subl %r13d, %eax movl %eax, %r13d sall $8, %r13d subl %eax, %r13d movl %edx, %eax subl %r13d, %eax movb %al, (%rcx) addl %edi, %edx addq $1, %rcx cmpq %rsi, %rcx jne .L5 jmp .L11 .L6: addl $1, %ebx addl $2, %r12d addq $8192, %rbp cmpl $1024, %ebx je .L3 .L4: leal 3(%rbx), %r11d leaq 8(%rbp), %rsi movl %r12d, %r9d movl $0, %r8d leal 4(%rbx), %r10d jmp .L8 .L3: popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z10fillMatrixPh, .-_Z10fillMatrixPh .globl _Z10fillKernelPf .type _Z10fillKernelPf, @function _Z10fillKernelPf: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $56, %rsp .cfi_def_cfa_offset 112 movq %rdi, 40(%rsp) movl $-1, %r15d jmp .L13 .L25: movsd .LC2(%rip), %xmm1 movsd 8(%rsp), %xmm0 call fmod@PLT jmp .L15 .L18: subl $1, %r15d addq $800, 40(%rsp) cmpl $-129, %r15d je .L12 .L13: pxor %xmm0, %xmm0 cvtsi2sdl %r15d, %xmm0 mulsd .LC5(%rip), %xmm0 movsd %xmm0, 32(%rsp) movq 40(%rsp), %r14 movl $0, %r13d .L21: addl $1, %r13d pxor %xmm0, %xmm0 cvtsi2sdl %r13d, %xmm0 mulsd .LC4(%rip), %xmm0 addsd 32(%rsp), %xmm0 movsd %xmm0, 24(%rsp) movq %r14, %rbp movl $0, %r12d .L19: addl $1, %r12d pxor %xmm0, %xmm0 cvtsi2sdl %r12d, %xmm0 mulsd .LC3(%rip), %xmm0 movsd 24(%rsp), %xmm4 subsd %xmm0, %xmm4 movsd %xmm4, 16(%rsp) movl $1, %ebx .L16: pxor %xmm0, %xmm0 cvtsi2sdl %ebx, %xmm0 mulsd .LC0(%rip), %xmm0 addsd 16(%rsp), %xmm0 movsd %xmm0, 8(%rsp) fld1 fldl 8(%rsp) .L14: fprem fnstsw %ax testb $4, %ah jne .L14 fstp %st(1) fstpl (%rsp) movsd (%rsp), %xmm2 ucomisd %xmm2, %xmm2 jp .L25 .L15: pxor %xmm0, %xmm0 cvtsd2ss (%rsp), %xmm0 movss %xmm0, -4(%rbp,%rbx,4) addq $1, %rbx cmpq $9, %rbx jne .L16 addq $32, %rbp cmpl $5, %r12d jne .L19 addq $160, %r14 cmpl $5, %r13d jne .L21 jmp .L18 .L12: addq $56, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _Z10fillKernelPf, .-_Z10fillKernelPf .section .rodata.str1.1,"aMS",@progbits,1 .LC7: .string "w" .LC8: .string "assignment4_out" .LC9: .string "%4f " .LC10: .string "\n" .text .globl _Z20print_matrix_to_filePf .type _Z20print_matrix_to_filePf, @function _Z20print_matrix_to_filePf: .LFB2059: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movq %rdi, %r15 leaq .LC7(%rip), %rsi leaq .LC8(%rip), %rdi call fopen@PLT movq %rax, %r12 movq %r15, %r14 addq $532684800, %r15 leaq .LC9(%rip), %r13 .L27: leaq 4080(%r14), %rbp .L31: leaq -4080(%rbp), %rbx .L28: pxor %xmm0, %xmm0 cvtss2sd (%rbx), %xmm0 movq %r13, %rdx movl $2, %esi movq %r12, %rdi movl $1, %eax call __fprintf_chk@PLT addq $4, %rbx cmpq %rbp, %rbx jne .L28 addq $4080, %rbp leaq 4165680(%r14), %rax cmpq %rax, %rbp jne .L31 leaq .LC10(%rip), %rdx movl $2, %esi movq %r12, %rdi movl $0, %eax call __fprintf_chk@PLT addq $4161600, %r14 cmpq %r15, %r14 jne .L27 movq %r12, %rdi call fclose@PLT addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _Z20print_matrix_to_filePf, .-_Z20print_matrix_to_filePf .globl _Z28__device_stub__Z4convPhPfS0_PhPfS0_ .type _Z28__device_stub__Z4convPhPfS0_PhPfS0_, @function _Z28__device_stub__Z4convPhPfS0_PhPfS0_: .LFB2085: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L38 .L34: movq 120(%rsp), %rax subq %fs:40, %rax jne .L39 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L38: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z4convPhPfS0_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L34 .L39: call __stack_chk_fail@PLT .cfi_endproc .LFE2085: .size _Z28__device_stub__Z4convPhPfS0_PhPfS0_, .-_Z28__device_stub__Z4convPhPfS0_PhPfS0_ .globl _Z4convPhPfS0_ .type _Z4convPhPfS0_, @function _Z4convPhPfS0_: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z28__device_stub__Z4convPhPfS0_PhPfS0_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _Z4convPhPfS0_, .-_Z4convPhPfS0_ .section .rodata.str1.1 .LC12: .string "%f\n" .text .globl main .type main, @function main: .LFB2060: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $80, %rsp .cfi_def_cfa_offset 112 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $8388608, %edi call malloc@PLT movq %rax, %r12 movl $102400, %edi call malloc@PLT movq %rax, %rbp movl $532684800, %edi call malloc@PLT movq %rax, %rbx movq %r12, %rdi call _Z10fillMatrixPh movq %rbp, %rdi call _Z10fillKernelPf leaq 8(%rsp), %rdi movl $8388608, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movl $102400, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $532684800, %esi call cudaMalloc@PLT movl $1, %ecx movl $8388608, %edx movq %r12, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $102400, %edx movq %rbp, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT leaq 32(%rsp), %rdi call cudaEventCreate@PLT leaq 40(%rsp), %rdi call cudaEventCreate@PLT movl $0x00000000, 4(%rsp) movl $0, %esi movq 32(%rsp), %rdi call cudaEventRecord@PLT movl $1024, 60(%rsp) movl $1, 64(%rsp) movl $128, 48(%rsp) movl $1024, 52(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L46 .L43: call cudaDeviceSynchronize@PLT movl $0, %esi movq 40(%rsp), %rdi call cudaEventRecord@PLT movq 40(%rsp), %rdi call cudaEventSynchronize@PLT leaq 4(%rsp), %rdi movq 40(%rsp), %rdx movq 32(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 4(%rsp), %xmm0 leaq .LC12(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl $2, %ecx movl $532684800, %edx movq 24(%rsp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT movq %rbx, %rdi call _Z20print_matrix_to_filePf movq 72(%rsp), %rax subq %fs:40, %rax jne .L47 movl $0, %eax addq $80, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L46: .cfi_restore_state movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z28__device_stub__Z4convPhPfS0_PhPfS0_ jmp .L43 .L47: call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size main, .-main .section .rodata.str1.1 .LC13: .string "_Z4convPhPfS0_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC13(%rip), %rdx movq %rdx, %rcx leaq _Z4convPhPfS0_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 1717986918 .long 1075603046 .align 8 .LC2: .long 0 .long 1072693248 .align 8 .LC3: .long 858993459 .long 1075000115 .align 8 .LC4: .long -1717986918 .long 1074370969 .align 8 .LC5: .long -858993459 .long 1073794252 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* Template code for convolution. CS6023, IITM */ #include<stdio.h> #include<cuda.h> #include<math.h> #define W 1024 // Input DIM #define OW (W-4) // Output DIM #define D 8 // Input and Kernel Depth #define T 5 // Kernel DIM #define N 128 // Number of kernels void fillMatrix(unsigned char *matrix){ unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix; for(int i=0;i<W;i++){ for(int j=0;j<W;j++){ for(int k=0;k<D;k++){ m[i][j][k]=(i*j+j*k+i*k+i*2+j*3+k*4)%255; } } } } void fillKernel(float *kernel){ float (*t)[T][T][D]=(float (*)[T][T][D])kernel; for(int i=0;i<N;i++){ for(int j=0;j<T;j++){ for(int k=0;k<T;k++){ for(int l=0;l<D;l++){ t[i][j][k][l]=fmod(-(i+1)*2.1+(j+1)*3.2-(k+1)*4.8+(l+1)*7.1,1.0); } } } } } void print_matrix_to_file(float *m){ const char *fname = "assignment4_out"; FILE *f = fopen(fname, "w"); float (*mat)[OW][OW]=(float (*)[OW][OW])m; for(unsigned i=0; i < N; i++) { for(unsigned j=0; j < OW; j++) for(unsigned k=0;k<OW;k++) fprintf(f,"%4f ", mat[i][j][k]); fprintf(f,"\n"); } fclose(f); } __global__ void conv(unsigned char *matrix,float *tile,float *output){ int filter=blockIdx.x; int eX=blockIdx.y; int eY=threadIdx.x; unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix; float (*t)[T][T][D]=(float (*)[T][T][D])tile; float (*o)[OW][OW]=(float (*)[OW][OW])output; __shared__ unsigned char slice[W][D]; float psum; if(eX<2||eX>W-3) return; for(int j=0;j<T;j++){ for(int i=0;i<D;i++){ slice[eY][i]=m[(eX+j-2)][eY][i]; } __syncthreads(); psum=0.0f; if(!(eY<2||eY>W-3)){ for(int k=0;k<T;k++){ for(int l=0;l<D;l++){ psum+=t[filter][j][k][l]*slice[eY+k-2][l]; } } atomicAdd(&o[filter][(eX-2)][eY-2],psum); } __syncthreads(); } } int main() { unsigned char *matrix=(unsigned char*)malloc(sizeof(unsigned char)*W*W*D); float *kernel=(float*)malloc(sizeof(float)*T*T*D*N); float *output=(float *)malloc(sizeof(float)*N*OW*OW); fillMatrix(matrix); fillKernel(kernel); unsigned char *Dmatrix;cudaMalloc(&Dmatrix,sizeof(unsigned char)*W*W*D); float *Dkernel;cudaMalloc(&Dkernel,sizeof(float)*N*T*T*D); float *Doutput;cudaMalloc(&Doutput,sizeof(float)*N*OW*OW); cudaMemcpy(Dmatrix, matrix, sizeof(unsigned char)*W*W*D,cudaMemcpyHostToDevice); cudaMemcpy(Dkernel, kernel, sizeof(float)*T*T*D*N,cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; cudaEventRecord(start,0); //Make your cuda kernel call conv<<<dim3(N,W),W>>>(Dmatrix,Dkernel,Doutput); cudaDeviceSynchronize(); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("%f\n",milliseconds); cudaMemcpy(output, Doutput, sizeof(float)*N*OW*OW,cudaMemcpyDeviceToHost); //Use print_matrix_to_file function only print_matrix_to_file(output); }
/* Template code for convolution. CS6023, IITM */ #include<stdio.h> #include<hip/hip_runtime.h> #include<math.h> #define W 1024 // Input DIM #define OW (W-4) // Output DIM #define D 8 // Input and Kernel Depth #define T 5 // Kernel DIM #define N 128 // Number of kernels void fillMatrix(unsigned char *matrix){ unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix; for(int i=0;i<W;i++){ for(int j=0;j<W;j++){ for(int k=0;k<D;k++){ m[i][j][k]=(i*j+j*k+i*k+i*2+j*3+k*4)%255; } } } } void fillKernel(float *kernel){ float (*t)[T][T][D]=(float (*)[T][T][D])kernel; for(int i=0;i<N;i++){ for(int j=0;j<T;j++){ for(int k=0;k<T;k++){ for(int l=0;l<D;l++){ t[i][j][k][l]=fmod(-(i+1)*2.1+(j+1)*3.2-(k+1)*4.8+(l+1)*7.1,1.0); } } } } } void print_matrix_to_file(float *m){ const char *fname = "assignment4_out"; FILE *f = fopen(fname, "w"); float (*mat)[OW][OW]=(float (*)[OW][OW])m; for(unsigned i=0; i < N; i++) { for(unsigned j=0; j < OW; j++) for(unsigned k=0;k<OW;k++) fprintf(f,"%4f ", mat[i][j][k]); fprintf(f,"\n"); } fclose(f); } __global__ void conv(unsigned char *matrix,float *tile,float *output){ int filter=blockIdx.x; int eX=blockIdx.y; int eY=threadIdx.x; unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix; float (*t)[T][T][D]=(float (*)[T][T][D])tile; float (*o)[OW][OW]=(float (*)[OW][OW])output; __shared__ unsigned char slice[W][D]; float psum; if(eX<2||eX>W-3) return; for(int j=0;j<T;j++){ for(int i=0;i<D;i++){ slice[eY][i]=m[(eX+j-2)][eY][i]; } __syncthreads(); psum=0.0f; if(!(eY<2||eY>W-3)){ for(int k=0;k<T;k++){ for(int l=0;l<D;l++){ psum+=t[filter][j][k][l]*slice[eY+k-2][l]; } } atomicAdd(&o[filter][(eX-2)][eY-2],psum); } __syncthreads(); } } int main() { unsigned char *matrix=(unsigned char*)malloc(sizeof(unsigned char)*W*W*D); float *kernel=(float*)malloc(sizeof(float)*T*T*D*N); float *output=(float *)malloc(sizeof(float)*N*OW*OW); fillMatrix(matrix); fillKernel(kernel); unsigned char *Dmatrix;hipMalloc(&Dmatrix,sizeof(unsigned char)*W*W*D); float *Dkernel;hipMalloc(&Dkernel,sizeof(float)*N*T*T*D); float *Doutput;hipMalloc(&Doutput,sizeof(float)*N*OW*OW); hipMemcpy(Dmatrix, matrix, sizeof(unsigned char)*W*W*D,hipMemcpyHostToDevice); hipMemcpy(Dkernel, kernel, sizeof(float)*T*T*D*N,hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; hipEventRecord(start,0); //Make your cuda kernel call conv<<<dim3(N,W),W>>>(Dmatrix,Dkernel,Doutput); hipDeviceSynchronize(); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("%f\n",milliseconds); hipMemcpy(output, Doutput, sizeof(float)*N*OW*OW,hipMemcpyDeviceToHost); //Use print_matrix_to_file function only print_matrix_to_file(output); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/* Template code for convolution. CS6023, IITM */ #include<stdio.h> #include<hip/hip_runtime.h> #include<math.h> #define W 1024 // Input DIM #define OW (W-4) // Output DIM #define D 8 // Input and Kernel Depth #define T 5 // Kernel DIM #define N 128 // Number of kernels void fillMatrix(unsigned char *matrix){ unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix; for(int i=0;i<W;i++){ for(int j=0;j<W;j++){ for(int k=0;k<D;k++){ m[i][j][k]=(i*j+j*k+i*k+i*2+j*3+k*4)%255; } } } } void fillKernel(float *kernel){ float (*t)[T][T][D]=(float (*)[T][T][D])kernel; for(int i=0;i<N;i++){ for(int j=0;j<T;j++){ for(int k=0;k<T;k++){ for(int l=0;l<D;l++){ t[i][j][k][l]=fmod(-(i+1)*2.1+(j+1)*3.2-(k+1)*4.8+(l+1)*7.1,1.0); } } } } } void print_matrix_to_file(float *m){ const char *fname = "assignment4_out"; FILE *f = fopen(fname, "w"); float (*mat)[OW][OW]=(float (*)[OW][OW])m; for(unsigned i=0; i < N; i++) { for(unsigned j=0; j < OW; j++) for(unsigned k=0;k<OW;k++) fprintf(f,"%4f ", mat[i][j][k]); fprintf(f,"\n"); } fclose(f); } __global__ void conv(unsigned char *matrix,float *tile,float *output){ int filter=blockIdx.x; int eX=blockIdx.y; int eY=threadIdx.x; unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix; float (*t)[T][T][D]=(float (*)[T][T][D])tile; float (*o)[OW][OW]=(float (*)[OW][OW])output; __shared__ unsigned char slice[W][D]; float psum; if(eX<2||eX>W-3) return; for(int j=0;j<T;j++){ for(int i=0;i<D;i++){ slice[eY][i]=m[(eX+j-2)][eY][i]; } __syncthreads(); psum=0.0f; if(!(eY<2||eY>W-3)){ for(int k=0;k<T;k++){ for(int l=0;l<D;l++){ psum+=t[filter][j][k][l]*slice[eY+k-2][l]; } } atomicAdd(&o[filter][(eX-2)][eY-2],psum); } __syncthreads(); } } int main() { unsigned char *matrix=(unsigned char*)malloc(sizeof(unsigned char)*W*W*D); float *kernel=(float*)malloc(sizeof(float)*T*T*D*N); float *output=(float *)malloc(sizeof(float)*N*OW*OW); fillMatrix(matrix); fillKernel(kernel); unsigned char *Dmatrix;hipMalloc(&Dmatrix,sizeof(unsigned char)*W*W*D); float *Dkernel;hipMalloc(&Dkernel,sizeof(float)*N*T*T*D); float *Doutput;hipMalloc(&Doutput,sizeof(float)*N*OW*OW); hipMemcpy(Dmatrix, matrix, sizeof(unsigned char)*W*W*D,hipMemcpyHostToDevice); hipMemcpy(Dkernel, kernel, sizeof(float)*T*T*D*N,hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; hipEventRecord(start,0); //Make your cuda kernel call conv<<<dim3(N,W),W>>>(Dmatrix,Dkernel,Doutput); hipDeviceSynchronize(); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("%f\n",milliseconds); hipMemcpy(output, Doutput, sizeof(float)*N*OW*OW,hipMemcpyDeviceToHost); //Use print_matrix_to_file function only print_matrix_to_file(output); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z4convPhPfS0_ .globl _Z4convPhPfS0_ .p2align 8 .type _Z4convPhPfS0_,@function _Z4convPhPfS0_: s_add_i32 s2, s15, 0xfffffc02 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lt_u32 s2, 0xfffffc04 s_cbranch_scc1 .LBB0_12 s_clause 0x1 s_load_b64 s[6:7], s[0:1], 0x10 s_load_b128 s[0:3], s[0:1], 0x0 v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v3, 0xfffffc02, v0 s_add_i32 s8, s15, -2 v_dual_mov_b32 v8, 0 :: v_dual_add_nc_u32 v1, -2, v0 s_mul_i32 s10, s14, 0x3f8040 s_mul_hi_i32 s9, s14, 0x3f8040 s_mul_hi_u32 s12, s8, 0xff0 s_mulk_i32 s8, 0xff0 s_mov_b32 s4, s15 s_mov_b32 s5, 0 v_lshlrev_b32_e32 v6, 3, v0 v_lshlrev_b64 v[0:1], 2, v[1:2] v_cmp_lt_u32_e32 vcc_lo, 0xfffffc03, v3 s_mul_i32 s11, s14, 0x320 s_delay_alu instid0(VALU_DEP_3) v_add_nc_u32_e32 v7, -16, v6 s_waitcnt lgkmcnt(0) s_add_u32 s6, s6, s10 s_addc_u32 s7, s7, s9 s_add_u32 s8, s6, s8 s_addc_u32 s9, s7, s12 s_lshl_b64 s[6:7], s[4:5], 13 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_add_u32 s0, s0, s6 s_addc_u32 s1, s1, s7 v_add_co_u32 v2, s0, s0, v6 v_add_co_ci_u32_e64 v3, null, s1, 0, s0 v_add_co_u32 v0, s0, s8, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_ci_u32_e64 v1, s0, s9, v1, s0 v_add_co_u32 v2, s0, v2, 0xffffc000 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, s0, -1, v3, s0 s_mul_hi_i32 s0, s14, 0x320 s_add_u32 s2, s2, s11 s_addc_u32 s3, s3, s0 s_branch .LBB0_3 .LBB0_2: s_or_b32 exec_lo, exec_lo, s4 v_add_co_u32 v2, s0, v2, 0x2000 s_add_i32 s5, s5, 1 v_add_co_ci_u32_e64 v3, s0, 0, v3, s0 s_add_u32 s2, s2, 0xa0 s_addc_u32 s3, s3, 0 s_cmp_lg_u32 s5, 5 s_barrier buffer_gl0_inv s_cbranch_scc0 .LBB0_12 .LBB0_3: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2 s_mov_b32 s1, 0 .LBB0_4: global_load_u8 v9, v[4:5], off v_add_co_u32 v4, s0, v4, 1 v_add_nc_u32_e32 v10, s1, v6 v_add_co_ci_u32_e64 v5, s0, 0, v5, s0 s_add_i32 s1, s1, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s1, 8 s_waitcnt vmcnt(0) ds_store_b8 v10, v9 s_cbranch_scc0 .LBB0_4 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_and_saveexec_b32 s4, vcc_lo s_cbranch_execz .LBB0_2 v_dual_mov_b32 v9, 0 :: v_dual_mov_b32 v4, v7 s_mov_b32 s8, 0 s_mov_b64 s[0:1], s[2:3] .p2align 6 .LBB0_7: s_delay_alu instid0(SALU_CYCLE_1) s_mov_b64 s[6:7], s[0:1] s_mov_b32 s9, 0 .LBB0_8: global_load_b32 v5, v8, s[6:7] v_add_nc_u32_e32 v10, s9, v4 s_add_i32 s9, s9, 1 s_add_u32 s6, s6, 4 s_addc_u32 s7, s7, 0 s_cmp_eq_u32 s9, 8 ds_load_u8 v10, v10 s_waitcnt lgkmcnt(0) v_cvt_f32_ubyte0_e32 v10, v10 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) v_fmac_f32_e32 v9, v5, v10 s_cbranch_scc0 .LBB0_8 s_add_i32 s8, s8, 1 v_add_nc_u32_e32 v4, 8, v4 s_add_u32 s0, s0, 32 s_addc_u32 s1, s1, 0 s_cmp_eq_u32 s8, 5 s_cbranch_scc0 .LBB0_7 global_load_b32 v5, v[0:1], off s_mov_b32 s1, 0 .LBB0_11: s_waitcnt vmcnt(0) v_add_f32_e32 v4, v5, v9 global_atomic_cmpswap_b32 v4, v[0:1], v[4:5], off glc s_waitcnt vmcnt(0) v_cmp_eq_u32_e64 s0, v4, v5 v_mov_b32_e32 v5, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_or_b32 s1, s0, s1 s_and_not1_b32 exec_lo, exec_lo, s1 s_cbranch_execnz .LBB0_11 s_branch .LBB0_2 .LBB0_12: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z4convPhPfS0_ .amdhsa_group_segment_fixed_size 8192 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 11 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z4convPhPfS0_, .Lfunc_end0-_Z4convPhPfS0_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 8192 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z4convPhPfS0_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z4convPhPfS0_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 11 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* Template code for convolution. CS6023, IITM */ #include<stdio.h> #include<hip/hip_runtime.h> #include<math.h> #define W 1024 // Input DIM #define OW (W-4) // Output DIM #define D 8 // Input and Kernel Depth #define T 5 // Kernel DIM #define N 128 // Number of kernels void fillMatrix(unsigned char *matrix){ unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix; for(int i=0;i<W;i++){ for(int j=0;j<W;j++){ for(int k=0;k<D;k++){ m[i][j][k]=(i*j+j*k+i*k+i*2+j*3+k*4)%255; } } } } void fillKernel(float *kernel){ float (*t)[T][T][D]=(float (*)[T][T][D])kernel; for(int i=0;i<N;i++){ for(int j=0;j<T;j++){ for(int k=0;k<T;k++){ for(int l=0;l<D;l++){ t[i][j][k][l]=fmod(-(i+1)*2.1+(j+1)*3.2-(k+1)*4.8+(l+1)*7.1,1.0); } } } } } void print_matrix_to_file(float *m){ const char *fname = "assignment4_out"; FILE *f = fopen(fname, "w"); float (*mat)[OW][OW]=(float (*)[OW][OW])m; for(unsigned i=0; i < N; i++) { for(unsigned j=0; j < OW; j++) for(unsigned k=0;k<OW;k++) fprintf(f,"%4f ", mat[i][j][k]); fprintf(f,"\n"); } fclose(f); } __global__ void conv(unsigned char *matrix,float *tile,float *output){ int filter=blockIdx.x; int eX=blockIdx.y; int eY=threadIdx.x; unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix; float (*t)[T][T][D]=(float (*)[T][T][D])tile; float (*o)[OW][OW]=(float (*)[OW][OW])output; __shared__ unsigned char slice[W][D]; float psum; if(eX<2||eX>W-3) return; for(int j=0;j<T;j++){ for(int i=0;i<D;i++){ slice[eY][i]=m[(eX+j-2)][eY][i]; } __syncthreads(); psum=0.0f; if(!(eY<2||eY>W-3)){ for(int k=0;k<T;k++){ for(int l=0;l<D;l++){ psum+=t[filter][j][k][l]*slice[eY+k-2][l]; } } atomicAdd(&o[filter][(eX-2)][eY-2],psum); } __syncthreads(); } } int main() { unsigned char *matrix=(unsigned char*)malloc(sizeof(unsigned char)*W*W*D); float *kernel=(float*)malloc(sizeof(float)*T*T*D*N); float *output=(float *)malloc(sizeof(float)*N*OW*OW); fillMatrix(matrix); fillKernel(kernel); unsigned char *Dmatrix;hipMalloc(&Dmatrix,sizeof(unsigned char)*W*W*D); float *Dkernel;hipMalloc(&Dkernel,sizeof(float)*N*T*T*D); float *Doutput;hipMalloc(&Doutput,sizeof(float)*N*OW*OW); hipMemcpy(Dmatrix, matrix, sizeof(unsigned char)*W*W*D,hipMemcpyHostToDevice); hipMemcpy(Dkernel, kernel, sizeof(float)*T*T*D*N,hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; hipEventRecord(start,0); //Make your cuda kernel call conv<<<dim3(N,W),W>>>(Dmatrix,Dkernel,Doutput); hipDeviceSynchronize(); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("%f\n",milliseconds); hipMemcpy(output, Doutput, sizeof(float)*N*OW*OW,hipMemcpyDeviceToHost); //Use print_matrix_to_file function only print_matrix_to_file(output); }
.text .file "new.hip" .globl _Z10fillMatrixPh # -- Begin function _Z10fillMatrixPh .p2align 4, 0x90 .type _Z10fillMatrixPh,@function _Z10fillMatrixPh: # @_Z10fillMatrixPh .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 xorl %eax, %eax movl $3, %ecx movl $4, %edx movl $2155905153, %esi # imm = 0x80808081 xorl %r8d, %r8d .p2align 4, 0x90 .LBB0_1: # %.preheader23 # =>This Loop Header: Depth=1 # Child Loop BB0_2 Depth 2 # Child Loop BB0_3 Depth 3 movl %edx, %r9d movl %eax, %r10d movq %rdi, %r11 xorl %ebx, %ebx .p2align 4, 0x90 .LBB0_2: # %.preheader # Parent Loop BB0_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB0_3 Depth 3 movl %r10d, %ebp xorl %r14d, %r14d .p2align 4, 0x90 .LBB0_3: # Parent Loop BB0_1 Depth=1 # Parent Loop BB0_2 Depth=2 # => This Inner Loop Header: Depth=3 movl %ebp, %r15d imulq %rsi, %r15 shrq $39, %r15 addb %bpl, %r15b movb %r15b, (%r11,%r14) incq %r14 addl %r9d, %ebp cmpq $8, %r14 jne .LBB0_3 # %bb.4: # in Loop: Header=BB0_2 Depth=2 incq %rbx addq $8, %r11 addl %ecx, %r10d incl %r9d cmpq $1024, %rbx # imm = 0x400 jne .LBB0_2 # %bb.5: # in Loop: Header=BB0_1 Depth=1 incq %r8 addq $8192, %rdi # imm = 0x2000 addl $2, %eax incl %ecx incl %edx cmpq $1024, %r8 # imm = 0x400 jne .LBB0_1 # %bb.6: popq %rbx .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z10fillMatrixPh, .Lfunc_end0-_Z10fillMatrixPh .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z10fillKernelPf .LCPI1_0: .quad 0x4000cccccccccccd # double 2.1000000000000001 .LCPI1_1: .quad 0x400999999999999a # double 3.2000000000000002 .LCPI1_2: .quad 0xc013333333333333 # double -4.7999999999999998 .LCPI1_3: .quad 0x401c666666666666 # double 7.0999999999999996 .LCPI1_4: .quad 0x3ff0000000000000 # double 1 .text .globl _Z10fillKernelPf .p2align 4, 0x90 .type _Z10fillKernelPf,@function _Z10fillKernelPf: # @_Z10fillKernelPf .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $40, %rsp .cfi_def_cfa_offset 96 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 xorl %eax, %eax movsd .LCPI1_3(%rip), %xmm1 # xmm1 = mem[0],zero movsd .LCPI1_4(%rip), %xmm2 # xmm2 = mem[0],zero .p2align 4, 0x90 .LBB1_1: # %.preheader21 # =>This Loop Header: Depth=1 # Child Loop BB1_2 Depth 2 # Child Loop BB1_3 Depth 3 # Child Loop BB1_4 Depth 4 movq %rax, (%rsp) # 8-byte Spill # kill: def $eax killed $eax killed $rax notl %eax xorps %xmm0, %xmm0 cvtsi2sd %eax, %xmm0 mulsd .LCPI1_0(%rip), %xmm0 movsd %xmm0, 16(%rsp) # 8-byte Spill movq %rdi, 8(%rsp) # 8-byte Spill movq %rdi, %r15 xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_2: # %.preheader20 # Parent Loop BB1_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB1_3 Depth 3 # Child Loop BB1_4 Depth 4 incq %r12 xorps %xmm0, %xmm0 cvtsi2sd %r12d, %xmm0 mulsd .LCPI1_1(%rip), %xmm0 addsd 16(%rsp), %xmm0 # 8-byte Folded Reload movsd %xmm0, 24(%rsp) # 8-byte Spill movq %r15, %r13 xorl %ebp, %ebp .p2align 4, 0x90 .LBB1_3: # %.preheader # Parent Loop BB1_1 Depth=1 # Parent Loop BB1_2 Depth=2 # => This Loop Header: Depth=3 # Child Loop BB1_4 Depth 4 incq %rbp xorps %xmm0, %xmm0 cvtsi2sd %ebp, %xmm0 mulsd .LCPI1_2(%rip), %xmm0 addsd 24(%rsp), %xmm0 # 8-byte Folded Reload movsd %xmm0, 32(%rsp) # 8-byte Spill xorl %ebx, %ebx .p2align 4, 0x90 .LBB1_4: # Parent Loop BB1_1 Depth=1 # Parent Loop BB1_2 Depth=2 # Parent Loop BB1_3 Depth=3 # => This Inner Loop Header: Depth=4 leaq 1(%rbx), %r14 xorps %xmm0, %xmm0 cvtsi2sd %r14d, %xmm0 mulsd %xmm1, %xmm0 addsd 32(%rsp), %xmm0 # 8-byte Folded Reload movaps %xmm2, %xmm1 callq fmod movsd .LCPI1_4(%rip), %xmm2 # xmm2 = mem[0],zero movsd .LCPI1_3(%rip), %xmm1 # xmm1 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%r13,%rbx,4) movq %r14, %rbx cmpq $8, %r14 jne .LBB1_4 # %bb.5: # in Loop: Header=BB1_3 Depth=3 addq $32, %r13 cmpq $5, %rbp jne .LBB1_3 # %bb.6: # in Loop: Header=BB1_2 Depth=2 addq $160, %r15 cmpq $5, %r12 jne .LBB1_2 # %bb.7: # in Loop: Header=BB1_1 Depth=1 movq (%rsp), %rax # 8-byte Reload incq %rax movq 8(%rsp), %rdi # 8-byte Reload addq $800, %rdi # imm = 0x320 cmpq $128, %rax jne .LBB1_1 # %bb.8: addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z10fillKernelPf, .Lfunc_end1-_Z10fillKernelPf .cfi_endproc # -- End function .globl _Z20print_matrix_to_filePf # -- Begin function _Z20print_matrix_to_filePf .p2align 4, 0x90 .type _Z20print_matrix_to_filePf,@function _Z20print_matrix_to_filePf: # @_Z20print_matrix_to_filePf .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 pushq %rax .cfi_def_cfa_offset 64 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdi, %rbx movl $.L.str, %edi movl $.L.str.1, %esi callq fopen movq %rax, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_1: # %.preheader16 # =>This Loop Header: Depth=1 # Child Loop BB2_2 Depth 2 # Child Loop BB2_3 Depth 3 movq %rbx, %r12 xorl %r13d, %r13d .p2align 4, 0x90 .LBB2_2: # %.preheader # Parent Loop BB2_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB2_3 Depth 3 xorl %ebp, %ebp .p2align 4, 0x90 .LBB2_3: # Parent Loop BB2_1 Depth=1 # Parent Loop BB2_2 Depth=2 # => This Inner Loop Header: Depth=3 movss (%r12,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.2, %esi movq %r14, %rdi movb $1, %al callq fprintf incq %rbp cmpq $1020, %rbp # imm = 0x3FC jne .LBB2_3 # %bb.4: # in Loop: Header=BB2_2 Depth=2 incq %r13 addq $4080, %r12 # imm = 0xFF0 cmpq $1020, %r13 # imm = 0x3FC jne .LBB2_2 # %bb.5: # in Loop: Header=BB2_1 Depth=1 movl $10, %edi movq %r14, %rsi callq fputc@PLT incq %r15 addq $4161600, %rbx # imm = 0x3F8040 cmpq $128, %r15 jne .LBB2_1 # %bb.6: movq %r14, %rdi addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp fclose # TAILCALL .Lfunc_end2: .size _Z20print_matrix_to_filePf, .Lfunc_end2-_Z20print_matrix_to_filePf .cfi_endproc # -- End function .globl _Z19__device_stub__convPhPfS0_ # -- Begin function _Z19__device_stub__convPhPfS0_ .p2align 4, 0x90 .type _Z19__device_stub__convPhPfS0_,@function _Z19__device_stub__convPhPfS0_: # @_Z19__device_stub__convPhPfS0_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z4convPhPfS0_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end3: .size _Z19__device_stub__convPhPfS0_, .Lfunc_end3-_Z19__device_stub__convPhPfS0_ .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI4_0: .quad 0x4000cccccccccccd # double 2.1000000000000001 .LCPI4_1: .quad 0x400999999999999a # double 3.2000000000000002 .LCPI4_2: .quad 0xc013333333333333 # double -4.7999999999999998 .LCPI4_3: .quad 0x401c666666666666 # double 7.0999999999999996 .LCPI4_4: .quad 0x3ff0000000000000 # double 1 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $216, %rsp .cfi_def_cfa_offset 272 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $8388608, %edi # imm = 0x800000 callq malloc movq %rax, %rbx movl $102400, %edi # imm = 0x19000 callq malloc movq %rax, %rbp movl $532684800, %edi # imm = 0x1FC02000 callq malloc xorl %r13d, %r13d movl $3, %ecx movl $4, %edx movl $2155905153, %esi # imm = 0x80808081 movq %rbx, 56(%rsp) # 8-byte Spill movq %rbx, %rdi xorl %r8d, %r8d .p2align 4, 0x90 .LBB4_1: # %.preheader23.i # =>This Loop Header: Depth=1 # Child Loop BB4_2 Depth 2 # Child Loop BB4_3 Depth 3 movl %edx, %r9d movl %r13d, %r10d movq %rdi, %r11 xorl %r14d, %r14d .p2align 4, 0x90 .LBB4_2: # %.preheader.i # Parent Loop BB4_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB4_3 Depth 3 movl %r10d, %ebx xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_3: # Parent Loop BB4_1 Depth=1 # Parent Loop BB4_2 Depth=2 # => This Inner Loop Header: Depth=3 movl %ebx, %r12d imulq %rsi, %r12 shrq $39, %r12 addb %bl, %r12b movb %r12b, (%r11,%r15) incq %r15 addl %r9d, %ebx cmpq $8, %r15 jne .LBB4_3 # %bb.4: # in Loop: Header=BB4_2 Depth=2 incq %r14 addq $8, %r11 addl %ecx, %r10d incl %r9d cmpq $1024, %r14 # imm = 0x400 jne .LBB4_2 # %bb.5: # in Loop: Header=BB4_1 Depth=1 incq %r8 addq $8192, %rdi # imm = 0x2000 addl $2, %r13d incl %ecx incl %edx cmpq $1024, %r8 # imm = 0x400 jne .LBB4_1 # %bb.6: # %.preheader21.i.preheader movq %rax, 72(%rsp) # 8-byte Spill xorl %eax, %eax movsd .LCPI4_3(%rip), %xmm1 # xmm1 = mem[0],zero movsd .LCPI4_4(%rip), %xmm2 # xmm2 = mem[0],zero movq %rbp, 64(%rsp) # 8-byte Spill movq %rbp, %r13 .p2align 4, 0x90 .LBB4_7: # %.preheader21.i # =>This Loop Header: Depth=1 # Child Loop BB4_8 Depth 2 # Child Loop BB4_9 Depth 3 # Child Loop BB4_10 Depth 4 movq %rax, 88(%rsp) # 8-byte Spill # kill: def $eax killed $eax killed $rax notl %eax xorps %xmm0, %xmm0 cvtsi2sd %eax, %xmm0 mulsd .LCPI4_0(%rip), %xmm0 movsd %xmm0, 96(%rsp) # 8-byte Spill movq %r13, 80(%rsp) # 8-byte Spill xorl %r14d, %r14d .p2align 4, 0x90 .LBB4_8: # %.preheader20.i # Parent Loop BB4_7 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB4_9 Depth 3 # Child Loop BB4_10 Depth 4 incq %r14 xorps %xmm0, %xmm0 cvtsi2sd %r14d, %xmm0 mulsd .LCPI4_1(%rip), %xmm0 addsd 96(%rsp), %xmm0 # 8-byte Folded Reload movsd %xmm0, 104(%rsp) # 8-byte Spill movq %r13, %rbp xorl %ebx, %ebx .p2align 4, 0x90 .LBB4_9: # %.preheader.i11 # Parent Loop BB4_7 Depth=1 # Parent Loop BB4_8 Depth=2 # => This Loop Header: Depth=3 # Child Loop BB4_10 Depth 4 incq %rbx xorps %xmm0, %xmm0 cvtsi2sd %ebx, %xmm0 mulsd .LCPI4_2(%rip), %xmm0 addsd 104(%rsp), %xmm0 # 8-byte Folded Reload movsd %xmm0, 112(%rsp) # 8-byte Spill xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_10: # Parent Loop BB4_7 Depth=1 # Parent Loop BB4_8 Depth=2 # Parent Loop BB4_9 Depth=3 # => This Inner Loop Header: Depth=4 leaq 1(%r15), %r12 xorps %xmm0, %xmm0 cvtsi2sd %r12d, %xmm0 mulsd %xmm1, %xmm0 addsd 112(%rsp), %xmm0 # 8-byte Folded Reload movaps %xmm2, %xmm1 callq fmod movsd .LCPI4_4(%rip), %xmm2 # xmm2 = mem[0],zero movsd .LCPI4_3(%rip), %xmm1 # xmm1 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%rbp,%r15,4) movq %r12, %r15 cmpq $8, %r12 jne .LBB4_10 # %bb.11: # in Loop: Header=BB4_9 Depth=3 addq $32, %rbp cmpq $5, %rbx jne .LBB4_9 # %bb.12: # in Loop: Header=BB4_8 Depth=2 addq $160, %r13 cmpq $5, %r14 jne .LBB4_8 # %bb.13: # in Loop: Header=BB4_7 Depth=1 movq 88(%rsp), %rax # 8-byte Reload incq %rax movq 80(%rsp), %r13 # 8-byte Reload addq $800, %r13 # imm = 0x320 cmpq $128, %rax jne .LBB4_7 # %bb.14: # %_Z10fillKernelPf.exit leaq 48(%rsp), %rdi movl $8388608, %esi # imm = 0x800000 callq hipMalloc leaq 40(%rsp), %rdi movl $102400, %esi # imm = 0x19000 callq hipMalloc leaq 32(%rsp), %rdi movl $532684800, %esi # imm = 0x1FC02000 callq hipMalloc movq 48(%rsp), %rdi movl $8388608, %edx # imm = 0x800000 movq 56(%rsp), %rsi # 8-byte Reload movl $1, %ecx callq hipMemcpy movq 40(%rsp), %rdi movl $102400, %edx # imm = 0x19000 movq 64(%rsp), %rsi # 8-byte Reload movl $1, %ecx callq hipMemcpy leaq 24(%rsp), %rdi callq hipEventCreate leaq 16(%rsp), %rdi callq hipEventCreate movl $0, 12(%rsp) movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movabsq $4398046511232, %rdi # imm = 0x40000000080 movabsq $4294968320, %rdx # imm = 0x100000400 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_16 # %bb.15: movq 48(%rsp), %rax movq 40(%rsp), %rcx movq 32(%rsp), %rdx movq %rax, 184(%rsp) movq %rcx, 176(%rsp) movq %rdx, 168(%rsp) leaq 184(%rsp), %rax movq %rax, 192(%rsp) leaq 176(%rsp), %rax movq %rax, 200(%rsp) leaq 168(%rsp), %rax movq %rax, 208(%rsp) leaq 152(%rsp), %rdi leaq 136(%rsp), %rsi leaq 128(%rsp), %rdx leaq 120(%rsp), %rcx callq __hipPopCallConfiguration movq 152(%rsp), %rsi movl 160(%rsp), %edx movq 136(%rsp), %rcx movl 144(%rsp), %r8d leaq 192(%rsp), %r9 movl $_Z4convPhPfS0_, %edi pushq 120(%rsp) .cfi_adjust_cfa_offset 8 pushq 136(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_16: callq hipDeviceSynchronize movq 16(%rsp), %rdi xorl %r15d, %r15d xorl %esi, %esi callq hipEventRecord movq 16(%rsp), %rdi callq hipEventSynchronize movq 24(%rsp), %rsi movq 16(%rsp), %rdx leaq 12(%rsp), %rdi callq hipEventElapsedTime movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.4, %edi movb $1, %al callq printf movq 32(%rsp), %rsi movl $532684800, %edx # imm = 0x1FC02000 movq 72(%rsp), %rbp # 8-byte Reload movq %rbp, %rdi movl $2, %ecx callq hipMemcpy movl $.L.str, %edi movl $.L.str.1, %esi callq fopen movq %rax, %r14 .p2align 4, 0x90 .LBB4_17: # %.preheader16.i # =>This Loop Header: Depth=1 # Child Loop BB4_18 Depth 2 # Child Loop BB4_19 Depth 3 movq %rbp, %rbx xorl %r12d, %r12d .p2align 4, 0x90 .LBB4_18: # %.preheader.i17 # Parent Loop BB4_17 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB4_19 Depth 3 xorl %r13d, %r13d .p2align 4, 0x90 .LBB4_19: # Parent Loop BB4_17 Depth=1 # Parent Loop BB4_18 Depth=2 # => This Inner Loop Header: Depth=3 movss (%rbx,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.2, %esi movq %r14, %rdi movb $1, %al callq fprintf incq %r13 cmpq $1020, %r13 # imm = 0x3FC jne .LBB4_19 # %bb.20: # in Loop: Header=BB4_18 Depth=2 incq %r12 addq $4080, %rbx # imm = 0xFF0 cmpq $1020, %r12 # imm = 0x3FC jne .LBB4_18 # %bb.21: # in Loop: Header=BB4_17 Depth=1 movl $10, %edi movq %r14, %rsi callq fputc@PLT incq %r15 addq $4161600, %rbp # imm = 0x3F8040 cmpq $128, %r15 jne .LBB4_17 # %bb.22: # %_Z20print_matrix_to_filePf.exit movq %r14, %rdi callq fclose xorl %eax, %eax addq $216, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size main, .Lfunc_end4-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB5_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB5_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z4convPhPfS0_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end5: .size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB6_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB6_2: retq .Lfunc_end6: .size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "assignment4_out" .size .L.str, 16 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "w" .size .L.str.1, 2 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%4f " .size .L.str.2, 5 .type _Z4convPhPfS0_,@object # @_Z4convPhPfS0_ .section .rodata,"a",@progbits .globl _Z4convPhPfS0_ .p2align 3, 0x0 _Z4convPhPfS0_: .quad _Z19__device_stub__convPhPfS0_ .size _Z4convPhPfS0_, 8 .type .L.str.4,@object # @.str.4 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.4: .asciz "%f\n" .size .L.str.4, 4 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z4convPhPfS0_" .size .L__unnamed_1, 15 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z19__device_stub__convPhPfS0_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z4convPhPfS0_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z4convPhPfS0_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R9, SR_CTAID.Y ; /* 0x0000000000097919 */ /* 0x000e240000002600 */ /*0020*/ IADD3 R9, R9, -0x2, RZ ; /* 0xfffffffe09097810 */ /* 0x001fc80007ffe0ff */ /*0030*/ ISETP.GT.U32.AND P0, PT, R9, 0x3fb, PT ; /* 0x000003fb0900780c */ /* 0x000fda0003f04070 */ /*0040*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0050*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e220000002100 */ /*0060*/ HFMA2.MMA R3, -RZ, RZ, 3.755092620849609375e-06, -3.814697265625e-06 ; /* 0x003f8040ff037435 */ /* 0x000fe200000001ff */ /*0070*/ MOV R7, 0x320 ; /* 0x0000032000077802 */ /* 0x000fe20000000f00 */ /*0080*/ HFMA2.MMA R22, -RZ, RZ, 0, 0 ; /* 0x00000000ff167435 */ /* 0x000fe200000001ff */ /*0090*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e620000002500 */ /*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*00b0*/ IMAD.WIDE R4, R0.reuse, 0x8, RZ ; /* 0x0000000800047825 */ /* 0x041fe200078e02ff */ /*00c0*/ IADD3 R11, R0, -0x2, RZ ; /* 0xfffffffe000b7810 */ /* 0x000fc80007ffe0ff */ /*00d0*/ IMAD.WIDE R2, R6, R3, c[0x0][0x170] ; /* 0x00005c0006027625 */ /* 0x002fe200078e0203 */ /*00e0*/ ISETP.GT.U32.AND P0, PT, R11, 0x3fb, PT ; /* 0x000003fb0b00780c */ /* 0x000fc60003f04070 */ /*00f0*/ IMAD.WIDE R4, R9, 0x2000, R4 ; /* 0x0000200009047825 */ /* 0x000fc800078e0204 */ /*0100*/ IMAD.WIDE R6, R6, R7, c[0x0][0x168] ; /* 0x00005a0006067625 */ /* 0x000fe200078e0207 */ /*0110*/ IADD3 R4, P1, R4, c[0x0][0x160], RZ ; /* 0x0000580004047a10 */ /* 0x000fc60007f3e0ff */ /*0120*/ IMAD.WIDE R2, R9, 0xff0, R2 ; /* 0x00000ff009027825 */ /* 0x000fe200078e0202 */ /*0130*/ IADD3 R6, P3, R6, 0x50, RZ ; /* 0x0000005006067810 */ /* 0x000fe40007f7e0ff */ /*0140*/ IADD3 R4, P2, R4, 0x3, RZ ; /* 0x0000000304047810 */ /* 0x000fe40007f5e0ff */ /*0150*/ IADD3.X R23, RZ, R7, RZ, P3, !PT ; /* 0x00000007ff177210 */ /* 0x000fe20001ffe4ff */ /*0160*/ IMAD.WIDE R2, R11, 0x4, R2 ; /* 0x000000040b027825 */ /* 0x000fe200078e0202 */ /*0170*/ IADD3.X R5, RZ, c[0x0][0x164], R5, P2, P1 ; /* 0x00005900ff057a10 */ /* 0x000fca00017e2405 */ /*0180*/ LDG.E.U8 R7, [R4.64+-0x3] ; /* 0xfffffd0404077981 */ /* 0x000ea8000c1e1100 */ /*0190*/ LDG.E.U8 R9, [R4.64+-0x2] ; /* 0xfffffe0404097981 */ /* 0x000ee8000c1e1100 */ /*01a0*/ LDG.E.U8 R11, [R4.64+-0x1] ; /* 0xffffff04040b7981 */ /* 0x001f28000c1e1100 */ /*01b0*/ LDG.E.U8 R13, [R4.64] ; /* 0x00000004040d7981 */ /* 0x000f68000c1e1100 */ /*01c0*/ LDG.E.U8 R15, [R4.64+0x1] ; /* 0x00000104040f7981 */ /* 0x000f68000c1e1100 */ /*01d0*/ LDG.E.U8 R17, [R4.64+0x2] ; /* 0x0000020404117981 */ /* 0x000f68000c1e1100 */ /*01e0*/ LDG.E.U8 R19, [R4.64+0x3] ; /* 0x0000030404137981 */ /* 0x000f68000c1e1100 */ /*01f0*/ LDG.E.U8 R21, [R4.64+0x4] ; /* 0x0000040404157981 */ /* 0x000f62000c1e1100 */ /*0200*/ BSSY B0, 0xce0 ; /* 0x00000ad000007945 */ /* 0x000fe60003800000 */ /*0210*/ STS.U8 [R0.X8], R7 ; /* 0x0000000700007388 */ /* 0x0041e80000008000 */ /*0220*/ STS.U8 [R0.X8+0x1], R9 ; /* 0x0000010900007388 */ /* 0x0083e80000008000 */ /*0230*/ STS.U8 [R0.X8+0x2], R11 ; /* 0x0000020b00007388 */ /* 0x0103e20000008000 */ /*0240*/ MOV R7, R23 ; /* 0x0000001700077202 */ /* 0x001fc60000000f00 */ /*0250*/ STS.U8 [R0.X8+0x3], R13 ; /* 0x0000030d00007388 */ /* 0x0203e80000008000 */ /*0260*/ STS.U8 [R0.X8+0x4], R15 ; /* 0x0000040f00007388 */ /* 0x0003e80000008000 */ /*0270*/ STS.U8 [R0.X8+0x5], R17 ; /* 0x0000051100007388 */ /* 0x0003e80000008000 */ /*0280*/ STS.U8 [R0.X8+0x6], R19 ; /* 0x0000061300007388 */ /* 0x0003e80000008000 */ /*0290*/ STS.U8 [R0.X8+0x7], R21 ; /* 0x0000071500007388 */ /* 0x0003e80000008000 */ /*02a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*02b0*/ @P0 BRA 0xcd0 ; /* 0x00000a1000000947 */ /* 0x000fea0003800000 */ /*02c0*/ LDG.E R26, [R6.64+-0x50] ; /* 0xffffb004061a7981 */ /* 0x002ea8000c1e1900 */ /*02d0*/ LDG.E R16, [R6.64+-0x4c] ; /* 0xffffb40406107981 */ /* 0x000ee8000c1e1900 */ /*02e0*/ LDG.E R14, [R6.64+-0x48] ; /* 0xffffb804060e7981 */ /* 0x000f28000c1e1900 */ /*02f0*/ LDG.E R13, [R6.64+-0x44] ; /* 0xffffbc04060d7981 */ /* 0x000f68000c1e1900 */ /*0300*/ LDG.E R12, [R6.64+-0x40] ; /* 0xffffc004060c7981 */ /* 0x000f68000c1e1900 */ /*0310*/ LDG.E R9, [R6.64+-0x3c] ; /* 0xffffc40406097981 */ /* 0x000f68000c1e1900 */ /*0320*/ LDG.E R8, [R6.64+-0x38] ; /* 0xffffc80406087981 */ /* 0x000f68000c1e1900 */ /*0330*/ LDG.E R10, [R6.64+-0x34] ; /* 0xffffcc04060a7981 */ /* 0x000f68000c1e1900 */ /*0340*/ LDG.E R11, [R6.64+-0x30] ; /* 0xffffd004060b7981 */ /* 0x000f68000c1e1900 */ /*0350*/ LDG.E R23, [R6.64+-0x2c] ; /* 0xffffd40406177981 */ /* 0x000f68000c1e1900 */ /*0360*/ LDG.E R19, [R6.64+-0x28] ; /* 0xffffd80406137981 */ /* 0x000f68000c1e1900 */ /*0370*/ LDS.U8 R17, [R0.X8+-0x10] ; /* 0xfffff00000117984 */ /* 0x000e280000008000 */ /*0380*/ LDG.E R20, [R6.64+-0x24] ; /* 0xffffdc0406147981 */ /* 0x000f68000c1e1900 */ /*0390*/ LDS.U8 R27, [R0.X8+-0xf] ; /* 0xfffff100001b7984 */ /* 0x000e680000008000 */ /*03a0*/ LDS.U8 R15, [R0.X8+-0xe] ; /* 0xfffff200000f7984 */ /* 0x000e680000008000 */ /*03b0*/ LDG.E R21, [R6.64+-0x20] ; /* 0xffffe00406157981 */ /* 0x000f68000c1e1900 */ /*03c0*/ LDS.U8 R24, [R0.X8+-0xd] ; /* 0xfffff30000187984 */ /* 0x000e680000008000 */ /*03d0*/ LDG.E R18, [R6.64+-0x1c] ; /* 0xffffe40406127981 */ /* 0x000f68000c1e1900 */ /*03e0*/ LDS.U8 R28, [R0.X8+-0x9] ; /* 0xfffff700001c7984 */ /* 0x000e620000008000 */ /*03f0*/ I2F.U16 R17, R17 ; /* 0x0000001100117306 */ /* 0x001eb00000101000 */ /*0400*/ I2F.U16 R27, R27 ; /* 0x0000001b001b7306 */ /* 0x002ef00000101000 */ /*0410*/ I2F.U16 R15, R15 ; /* 0x0000000f000f7306 */ /* 0x000f300000101000 */ /*0420*/ I2F.U16 R24, R24 ; /* 0x0000001800187306 */ /* 0x000f700000101000 */ /*0430*/ I2F.U16 R28, R28 ; /* 0x0000001c001c7306 */ /* 0x000fe20000101000 */ /*0440*/ FFMA R25, R17, R26, RZ ; /* 0x0000001a11197223 */ /* 0x004fc400000000ff */ /*0450*/ LDG.E R17, [R6.64+-0x14] ; /* 0xffffec0406117981 */ /* 0x000ea4000c1e1900 */ /*0460*/ FFMA R26, R27, R16, R25 ; /* 0x000000101b1a7223 */ /* 0x008fe40000000019 */ /*0470*/ LDS.U8 R25, [R0.X8+-0xc] ; /* 0xfffff40000197984 */ /* 0x000e280000008000 */ /*0480*/ LDG.E R16, [R6.64+-0x18] ; /* 0xffffe80406107981 */ /* 0x000ee2000c1e1900 */ /*0490*/ FFMA R14, R15, R14, R26 ; /* 0x0000000e0f0e7223 */ /* 0x010fc6000000001a */ /*04a0*/ LDS.U8 R26, [R0.X8+-0xb] ; /* 0xfffff500001a7984 */ /* 0x000e680000008000 */ /*04b0*/ LDS.U8 R27, [R0.X8+-0xa] ; /* 0xfffff600001b7984 */ /* 0x000f280000008000 */ /*04c0*/ LDG.E R15, [R6.64+-0x10] ; /* 0xfffff004060f7981 */ /* 0x000ea2000c1e1900 */ /*04d0*/ FFMA R13, R24, R13, R14 ; /* 0x0000000d180d7223 */ /* 0x020fc6000000000e */ /*04e0*/ LDG.E R14, [R6.64+-0xc] ; /* 0xfffff404060e7981 */ /* 0x000f68000c1e1900 */ /*04f0*/ LDS.U8 R24, [R0.X8+-0x8] ; /* 0xfffff80000187984 */ /* 0x000f220000008000 */ /*0500*/ I2F.U16 R25, R25 ; /* 0x0000001900197306 */ /* 0x001e300000101000 */ /*0510*/ I2F.U16 R26, R26 ; /* 0x0000001a001a7306 */ /* 0x002e620000101000 */ /*0520*/ FFMA R13, R25, R12, R13 ; /* 0x0000000c190d7223 */ /* 0x001fc4000000000d */ /*0530*/ LDG.E R12, [R6.64+-0x8] ; /* 0xfffff804060c7981 */ /* 0x000f64000c1e1900 */ /*0540*/ FFMA R9, R26, R9, R13 ; /* 0x000000091a097223 */ /* 0x002fe4000000000d */ /*0550*/ LDG.E R13, [R6.64+-0x4] ; /* 0xfffffc04060d7981 */ /* 0x000f62000c1e1900 */ /*0560*/ I2F.U16 R27, R27 ; /* 0x0000001b001b7306 */ /* 0x010e240000101000 */ /*0570*/ FFMA R25, R27, R8, R9 ; /* 0x000000081b197223 */ /* 0x001fe40000000009 */ /*0580*/ LDG.E R8, [R6.64] ; /* 0x0000000406087981 */ /* 0x000f28000c1e1900 */ /*0590*/ I2F.U16 R24, R24 ; /* 0x0000001800187306 */ /* 0x000e220000101000 */ /*05a0*/ LDG.E R9, [R6.64+0x4] ; /* 0x0000040406097981 */ /* 0x000f22000c1e1900 */ /*05b0*/ FFMA R26, R28, R10, R25 ; /* 0x0000000a1c1a7223 */ /* 0x000fc60000000019 */ /*05c0*/ LDG.E R10, [R6.64+0x8] ; /* 0x00000804060a7981 */ /* 0x000f28000c1e1900 */ /*05d0*/ LDS.U8 R25, [R0.X8+-0x7] ; /* 0xfffff90000197984 */ /* 0x000e680000008000 */ /*05e0*/ LDS.U8 R27, [R0.X8+-0x6] ; /* 0xfffffa00001b7984 */ /* 0x000e620000008000 */ /*05f0*/ FFMA R26, R24, R11, R26 ; /* 0x0000000b181a7223 */ /* 0x001fc6000000001a */ /*0600*/ LDG.E R11, [R6.64+0xc] ; /* 0x00000c04060b7981 */ /* 0x000f28000c1e1900 */ /*0610*/ LDS.U8 R28, [R0.X8+-0x5] ; /* 0xfffffb00001c7984 */ /* 0x000e220000008000 */ /*0620*/ I2F.U16 R25, R25 ; /* 0x0000001900197306 */ /* 0x002e640000101000 */ /*0630*/ FFMA R26, R25, R23, R26 ; /* 0x00000017191a7223 */ /* 0x002fe4000000001a */ /*0640*/ LDS.U8 R23, [R0.X8+-0x4] ; /* 0xfffffc0000177984 */ /* 0x000e680000008000 */ /*0650*/ I2F.U16 R27, R27 ; /* 0x0000001b001b7306 */ /* 0x000e300000101000 */ /*0660*/ I2F.U16 R28, R28 ; /* 0x0000001c001c7306 */ /* 0x001e220000101000 */ /*0670*/ FFMA R29, R27, R19, R26 ; /* 0x000000131b1d7223 */ /* 0x000fc4000000001a */ /*0680*/ LDG.E R19, [R6.64+0x10] ; /* 0x0000100406137981 */ /* 0x000f24000c1e1900 */ /*0690*/ FFMA R20, R28, R20, R29 ; /* 0x000000141c147223 */ /* 0x001fe4000000001d */ /*06a0*/ LDS.U8 R29, [R0.X8+-0x2] ; /* 0xfffffe00001d7984 */ /* 0x000e220000008000 */ /*06b0*/ I2F.U16 R24, R23 ; /* 0x0000001700187306 */ /* 0x0022660000101000 */ /*06c0*/ LDS.U8 R23, [R0.X8+-0x3] ; /* 0xfffffd0000177984 */ /* 0x002e620000008000 */ /*06d0*/ FFMA R25, R24, R21, R20 ; /* 0x0000001518197223 */ /* 0x000fc60000000014 */ /*06e0*/ LDS.U8 R24, [R0.X8+-0x1] ; /* 0xffffff0000187984 */ /* 0x000e680000008000 */ /*06f0*/ LDG.E R20, [R6.64+0x14] ; /* 0x0000140406147981 */ /* 0x000f28000c1e1900 */ /*0700*/ LDS.U8 R21, [R0.X8] ; /* 0x0000000000157984 */ /* 0x000e620000008000 */ /*0710*/ I2F.U16 R27, R29 ; /* 0x0000001d001b7306 */ /* 0x0011e60000101000 */ /*0720*/ LDS.U8 R29, [R0.X8+0xb] ; /* 0x00000b00001d7984 */ /* 0x001fea0000008000 */ /*0730*/ I2F.U16 R26, R23 ; /* 0x00000017001a7306 */ /* 0x0020700000101000 */ /*0740*/ I2F.U16 R24, R24 ; /* 0x0000001800187306 */ /* 0x000ea20000101000 */ /*0750*/ LDS.U8 R23, [R0.X8+0x4] ; /* 0x0000040000177984 */ /* 0x001fe20000008000 */ /*0760*/ FFMA R26, R26, R18, R25 ; /* 0x000000121a1a7223 */ /* 0x002fc60000000019 */ /*0770*/ LDS.U8 R25, [R0.X8+0x1] ; /* 0x0000010000197984 */ /* 0x000e280000008000 */ /*0780*/ LDG.E R18, [R6.64+0x18] ; /* 0x0000180406127981 */ /* 0x000f22000c1e1900 */ /*0790*/ I2F.U16 R21, R21 ; /* 0x0000001500157306 */ /* 0x000e620000101000 */ /*07a0*/ FFMA R16, R27, R16, R26 ; /* 0x000000101b107223 */ /* 0x008fe4000000001a */ /*07b0*/ LDS.U8 R26, [R0.X8+0x2] ; /* 0x00000200001a7984 */ /* 0x000ee40000008000 */ /*07c0*/ FFMA R28, R24, R17, R16 ; /* 0x00000011181c7223 */ /* 0x004fc40000000010 */ /*07d0*/ LDS.U8 R27, [R0.X8+0x3] ; /* 0x00000300001b7984 */ /* 0x000ea80000008000 */ /*07e0*/ LDG.E R16, [R6.64+0x1c] ; /* 0x00001c0406107981 */ /* 0x000f22000c1e1900 */ /*07f0*/ I2F.U16 R25, R25 ; /* 0x0000001900197306 */ /* 0x001f620000101000 */ /*0800*/ FFMA R17, R21, R15, R28 ; /* 0x0000000f15117223 */ /* 0x002fe4000000001c */ /*0810*/ LDG.E R15, [R6.64+0x20] ; /* 0x00002004060f7981 */ /* 0x000f28000c1e1900 */ /*0820*/ LDS.U8 R24, [R0.X8+0x5] ; /* 0x0000050000187984 */ /* 0x000e280000008000 */ /*0830*/ LDS.U8 R28, [R0.X8+0x7] ; /* 0x00000700001c7984 */ /* 0x000fe20000008000 */ /*0840*/ I2F.U16 R23, R23 ; /* 0x0000001700177306 */ /* 0x000fe60000101000 */ /*0850*/ LDG.E R21, [R6.64+0x3c] ; /* 0x00003c0406157981 */ /* 0x000f22000c1e1900 */ /*0860*/ FFMA R17, R25, R14, R17 ; /* 0x0000000e19117223 */ /* 0x020fc60000000011 */ /*0870*/ LDG.E R14, [R6.64+0x24] ; /* 0x00002404060e7981 */ /* 0x000f68000c1e1900 */ /*0880*/ LDS.U8 R25, [R0.X8+0x6] ; /* 0x0000060000197984 */ /* 0x000e620000008000 */ /*0890*/ I2F.U16 R26, R26 ; /* 0x0000001a001a7306 */ /* 0x008ef00000101000 */ /*08a0*/ I2F.U16 R27, R27 ; /* 0x0000001b001b7306 */ /* 0x004ea20000101000 */ /*08b0*/ FFMA R12, R26, R12, R17 ; /* 0x0000000c1a0c7223 */ /* 0x008fc40000000011 */ /*08c0*/ LDG.E R17, [R6.64+0x28] ; /* 0x0000280406117981 */ /* 0x000ee4000c1e1900 */ /*08d0*/ FFMA R13, R27, R13, R12 ; /* 0x0000000d1b0d7223 */ /* 0x004fe4000000000c */ /*08e0*/ LDG.E R12, [R6.64+0x2c] ; /* 0x00002c04060c7981 */ /* 0x000ea2000c1e1900 */ /*08f0*/ I2F.U16 R24, R24 ; /* 0x0000001800187306 */ /* 0x001e300000101000 */ /*0900*/ I2F.U16 R25, R25 ; /* 0x0000001900197306 */ /* 0x002e620000101000 */ /*0910*/ FFMA R26, R23, R8, R13 ; /* 0x00000008171a7223 */ /* 0x010fc4000000000d */ /*0920*/ LDG.E R8, [R6.64+0x30] ; /* 0x0000300406087981 */ /* 0x000f28000c1e1900 */ /*0930*/ LDG.E R13, [R6.64+0x34] ; /* 0x00003404060d7981 */ /* 0x000f22000c1e1900 */ /*0940*/ FFMA R27, R24, R9, R26 ; /* 0x00000009181b7223 */ /* 0x001fc6000000001a */ /*0950*/ LDG.E R9, [R6.64+0x38] ; /* 0x0000380406097981 */ /* 0x000f22000c1e1900 */ /*0960*/ I2F.U16 R28, R28 ; /* 0x0000001c001c7306 */ /* 0x000e260000101000 */ /*0970*/ LDG.E R23, [R6.64+0x40] ; /* 0x0000400406177981 */ /* 0x000f22000c1e1900 */ /*0980*/ FFMA R26, R25, R10, R27 ; /* 0x0000000a191a7223 */ /* 0x002fc6000000001b */ /*0990*/ LDG.E R10, [R6.64+0x44] ; /* 0x00004404060a7981 */ /* 0x000f28000c1e1900 */ /*09a0*/ LDG.E R24, [R6.64+0x48] ; /* 0x0000480406187981 */ /* 0x000f28000c1e1900 */ /*09b0*/ LDS.U8 R25, [R0.X8+0x8] ; /* 0x0000080000197984 */ /* 0x000e620000008000 */ /*09c0*/ FFMA R26, R28, R11, R26 ; /* 0x0000000b1c1a7223 */ /* 0x001fc6000000001a */ /*09d0*/ LDG.E R11, [R6.64+0x4c] ; /* 0x00004c04060b7981 */ /* 0x000f28000c1e1900 */ /*09e0*/ LDS.U8 R28, [R0.X8+0xa] ; /* 0x00000a00001c7984 */ /* 0x000e220000008000 */ /*09f0*/ I2F.U16 R25, R25 ; /* 0x0000001900197306 */ /* 0x002e640000101000 */ /*0a00*/ FFMA R19, R25, R19, R26 ; /* 0x0000001319137223 */ /* 0x002fe4000000001a */ /*0a10*/ LDS.U8 R26, [R0.X8+0x9] ; /* 0x00000900001a7984 */ /* 0x000e680000008000 */ /*0a20*/ I2F.U16 R28, R28 ; /* 0x0000001c001c7306 */ /* 0x001fe20000101000 */ /*0a30*/ LDS.U8 R25, [R0.X8+0x10] ; /* 0x0000100000197984 */ /* 0x000fee0000008000 */ /*0a40*/ I2F.U16 R29, R29 ; /* 0x0000001d001d7306 */ /* 0x000ff00000101000 */ /*0a50*/ I2F.U16 R26, R26 ; /* 0x0000001a001a7306 */ /* 0x002e240000101000 */ /*0a60*/ FFMA R27, R26, R20, R19 ; /* 0x000000141a1b7223 */ /* 0x001fc40000000013 */ /*0a70*/ LDS.U8 R19, [R0.X8+0xc] ; /* 0x00000c0000137984 */ /* 0x000e280000008000 */ /*0a80*/ LDS.U8 R20, [R0.X8+0xe] ; /* 0x00000e0000147984 */ /* 0x000e620000008000 */ /*0a90*/ FFMA R27, R28, R18, R27 ; /* 0x000000121c1b7223 */ /* 0x000fc6000000001b */ /*0aa0*/ LDS.U8 R18, [R0.X8+0xd] ; /* 0x00000d0000127984 */ /* 0x000f680000008000 */ /*0ab0*/ LDS.U8 R28, [R0.X8+0x13] ; /* 0x00001300001c7984 */ /* 0x000fe20000008000 */ /*0ac0*/ I2F.U16 R26, R19 ; /* 0x00000013001a7306 */ /* 0x001e300000101000 */ /*0ad0*/ I2F.U16 R20, R20 ; /* 0x0000001400147306 */ /* 0x002fe20000101000 */ /*0ae0*/ FFMA R27, R29, R16, R27 ; /* 0x000000101d1b7223 */ /* 0x000fc4000000001b */ /*0af0*/ LDS.U8 R16, [R0.X8+0xf] ; /* 0x00000f0000107984 */ /* 0x000e6a0000008000 */ /*0b00*/ I2F.U16 R18, R18 ; /* 0x0000001200127306 */ /* 0x020f620000101000 */ /*0b10*/ FFMA R15, R26, R15, R27 ; /* 0x0000000f1a0f7223 */ /* 0x001fe4000000001b */ /*0b20*/ LDS.U8 R26, [R0.X8+0x11] ; /* 0x00001100001a7984 */ /* 0x000e280000008000 */ /*0b30*/ LDS.U8 R27, [R0.X8+0x12] ; /* 0x00001200001b7984 */ /* 0x000e220000008000 */ /*0b40*/ FFMA R14, R18, R14, R15 ; /* 0x0000000e120e7223 */ /* 0x020fc6000000000f */ /*0b50*/ LDS.U8 R15, [R0.X8+0x15] ; /* 0x00001500000f7984 */ /* 0x000fe20000008000 */ /*0b60*/ I2F.U16 R16, R16 ; /* 0x0000001000107306 */ /* 0x002ea20000101000 */ /*0b70*/ FFMA R29, R20, R17, R14 ; /* 0x00000011141d7223 */ /* 0x008fe4000000000e */ /*0b80*/ LDS.U8 R17, [R0.X8+0x14] ; /* 0x0000140000117984 */ /* 0x000e680000008000 */ /*0b90*/ LDS.U8 R14, [R0.X8+0x16] ; /* 0x00001600000e7984 */ /* 0x000ee20000008000 */ /*0ba0*/ FFMA R19, R16, R12, R29 ; /* 0x0000000c10137223 */ /* 0x004fc6000000001d */ /*0bb0*/ LDS.U8 R12, [R0.X8+0x17] ; /* 0x00001700000c7984 */ /* 0x000ea20000008000 */ /*0bc0*/ I2F.U16 R25, R25 ; /* 0x0000001900197306 */ /* 0x000f300000101000 */ /*0bd0*/ I2F.U16 R26, R26 ; /* 0x0000001a001a7306 */ /* 0x001e300000101000 */ /*0be0*/ I2F.U16 R18, R27 ; /* 0x0000001b00127306 */ /* 0x000f620000101000 */ /*0bf0*/ FFMA R8, R25, R8, R19 ; /* 0x0000000819087223 */ /* 0x010fce0000000013 */ /*0c00*/ I2F.U16 R28, R28 ; /* 0x0000001c001c7306 */ /* 0x000f220000101000 */ /*0c10*/ FFMA R8, R26, R13, R8 ; /* 0x0000000d1a087223 */ /* 0x001fce0000000008 */ /*0c20*/ I2F.U16 R16, R17 ; /* 0x0000001100107306 */ /* 0x002e220000101000 */ /*0c30*/ FFMA R8, R18, R9, R8 ; /* 0x0000000912087223 */ /* 0x020fce0000000008 */ /*0c40*/ I2F.U16 R15, R15 ; /* 0x0000000f000f7306 */ /* 0x000e620000101000 */ /*0c50*/ FFMA R8, R28, R21, R8 ; /* 0x000000151c087223 */ /* 0x010fce0000000008 */ /*0c60*/ I2F.U16 R14, R14 ; /* 0x0000000e000e7306 */ /* 0x008ee20000101000 */ /*0c70*/ FFMA R23, R16, R23, R8 ; /* 0x0000001710177223 */ /* 0x001fce0000000008 */ /*0c80*/ I2F.U16 R12, R12 ; /* 0x0000000c000c7306 */ /* 0x004e220000101000 */ /*0c90*/ FFMA R23, R15, R10, R23 ; /* 0x0000000a0f177223 */ /* 0x002fc80000000017 */ /*0ca0*/ FFMA R24, R14, R24, R23 ; /* 0x000000180e187223 */ /* 0x008fc80000000017 */ /*0cb0*/ FFMA R11, R12, R11, R24 ; /* 0x0000000b0c0b7223 */ /* 0x001fca0000000018 */ /*0cc0*/ RED.E.ADD.F32.FTZ.RN.STRONG.GPU [R2.64], R11 ; /* 0x0000000b0200798e */ /* 0x0001e4000c10e784 */ /*0cd0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x002fea0003800000 */ /*0ce0*/ IADD3 R22, R22, 0x1, RZ ; /* 0x0000000116167810 */ /* 0x000fe20007ffe0ff */ /*0cf0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0d00*/ IADD3 R4, P1, R4, 0x2000, RZ ; /* 0x0000200004047810 */ /* 0x000fe40007f3e0ff */ /*0d10*/ ISETP.NE.AND P3, PT, R22, 0x5, PT ; /* 0x000000051600780c */ /* 0x000fe40003f65270 */ /*0d20*/ IADD3 R6, P2, R6, 0xa0, RZ ; /* 0x000000a006067810 */ /* 0x000fe40007f5e0ff */ /*0d30*/ IADD3.X R5, RZ, R5, RZ, P1, !PT ; /* 0x00000005ff057210 */ /* 0x000fe40000ffe4ff */ /*0d40*/ IADD3.X R23, RZ, R7, RZ, P2, !PT ; /* 0x00000007ff177210 */ /* 0x000fce00017fe4ff */ /*0d50*/ @P3 BRA 0x180 ; /* 0xfffff42000003947 */ /* 0x000fea000383ffff */ /*0d60*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0d70*/ BRA 0xd70; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0d80*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d90*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0da0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0db0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0dc0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0dd0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0de0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0df0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z4convPhPfS0_ .globl _Z4convPhPfS0_ .p2align 8 .type _Z4convPhPfS0_,@function _Z4convPhPfS0_: s_add_i32 s2, s15, 0xfffffc02 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lt_u32 s2, 0xfffffc04 s_cbranch_scc1 .LBB0_12 s_clause 0x1 s_load_b64 s[6:7], s[0:1], 0x10 s_load_b128 s[0:3], s[0:1], 0x0 v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v3, 0xfffffc02, v0 s_add_i32 s8, s15, -2 v_dual_mov_b32 v8, 0 :: v_dual_add_nc_u32 v1, -2, v0 s_mul_i32 s10, s14, 0x3f8040 s_mul_hi_i32 s9, s14, 0x3f8040 s_mul_hi_u32 s12, s8, 0xff0 s_mulk_i32 s8, 0xff0 s_mov_b32 s4, s15 s_mov_b32 s5, 0 v_lshlrev_b32_e32 v6, 3, v0 v_lshlrev_b64 v[0:1], 2, v[1:2] v_cmp_lt_u32_e32 vcc_lo, 0xfffffc03, v3 s_mul_i32 s11, s14, 0x320 s_delay_alu instid0(VALU_DEP_3) v_add_nc_u32_e32 v7, -16, v6 s_waitcnt lgkmcnt(0) s_add_u32 s6, s6, s10 s_addc_u32 s7, s7, s9 s_add_u32 s8, s6, s8 s_addc_u32 s9, s7, s12 s_lshl_b64 s[6:7], s[4:5], 13 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_add_u32 s0, s0, s6 s_addc_u32 s1, s1, s7 v_add_co_u32 v2, s0, s0, v6 v_add_co_ci_u32_e64 v3, null, s1, 0, s0 v_add_co_u32 v0, s0, s8, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_ci_u32_e64 v1, s0, s9, v1, s0 v_add_co_u32 v2, s0, v2, 0xffffc000 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v3, s0, -1, v3, s0 s_mul_hi_i32 s0, s14, 0x320 s_add_u32 s2, s2, s11 s_addc_u32 s3, s3, s0 s_branch .LBB0_3 .LBB0_2: s_or_b32 exec_lo, exec_lo, s4 v_add_co_u32 v2, s0, v2, 0x2000 s_add_i32 s5, s5, 1 v_add_co_ci_u32_e64 v3, s0, 0, v3, s0 s_add_u32 s2, s2, 0xa0 s_addc_u32 s3, s3, 0 s_cmp_lg_u32 s5, 5 s_barrier buffer_gl0_inv s_cbranch_scc0 .LBB0_12 .LBB0_3: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2 s_mov_b32 s1, 0 .LBB0_4: global_load_u8 v9, v[4:5], off v_add_co_u32 v4, s0, v4, 1 v_add_nc_u32_e32 v10, s1, v6 v_add_co_ci_u32_e64 v5, s0, 0, v5, s0 s_add_i32 s1, s1, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s1, 8 s_waitcnt vmcnt(0) ds_store_b8 v10, v9 s_cbranch_scc0 .LBB0_4 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_and_saveexec_b32 s4, vcc_lo s_cbranch_execz .LBB0_2 v_dual_mov_b32 v9, 0 :: v_dual_mov_b32 v4, v7 s_mov_b32 s8, 0 s_mov_b64 s[0:1], s[2:3] .p2align 6 .LBB0_7: s_delay_alu instid0(SALU_CYCLE_1) s_mov_b64 s[6:7], s[0:1] s_mov_b32 s9, 0 .LBB0_8: global_load_b32 v5, v8, s[6:7] v_add_nc_u32_e32 v10, s9, v4 s_add_i32 s9, s9, 1 s_add_u32 s6, s6, 4 s_addc_u32 s7, s7, 0 s_cmp_eq_u32 s9, 8 ds_load_u8 v10, v10 s_waitcnt lgkmcnt(0) v_cvt_f32_ubyte0_e32 v10, v10 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) v_fmac_f32_e32 v9, v5, v10 s_cbranch_scc0 .LBB0_8 s_add_i32 s8, s8, 1 v_add_nc_u32_e32 v4, 8, v4 s_add_u32 s0, s0, 32 s_addc_u32 s1, s1, 0 s_cmp_eq_u32 s8, 5 s_cbranch_scc0 .LBB0_7 global_load_b32 v5, v[0:1], off s_mov_b32 s1, 0 .LBB0_11: s_waitcnt vmcnt(0) v_add_f32_e32 v4, v5, v9 global_atomic_cmpswap_b32 v4, v[0:1], v[4:5], off glc s_waitcnt vmcnt(0) v_cmp_eq_u32_e64 s0, v4, v5 v_mov_b32_e32 v5, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_or_b32 s1, s0, s1 s_and_not1_b32 exec_lo, exec_lo, s1 s_cbranch_execnz .LBB0_11 s_branch .LBB0_2 .LBB0_12: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z4convPhPfS0_ .amdhsa_group_segment_fixed_size 8192 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 11 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z4convPhPfS0_, .Lfunc_end0-_Z4convPhPfS0_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 8192 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z4convPhPfS0_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z4convPhPfS0_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 11 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00195afa_00000000-6_new.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z10fillMatrixPh .type _Z10fillMatrixPh, @function _Z10fillMatrixPh: .LFB2057: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 movq %rdi, %rbp movl $0, %r12d movl $0, %ebx jmp .L4 .L11: addl $1, %r8d addl %r11d, %r9d addq $8, %rsi cmpl $1024, %r8d je .L6 .L8: leal (%r10,%r8), %edi leaq -8(%rsi), %rcx movl %r9d, %edx .L5: movslq %edx, %rax imulq $-2139062143, %rax, %rax shrq $32, %rax addl %edx, %eax sarl $7, %eax movl %edx, %r13d sarl $31, %r13d subl %r13d, %eax movl %eax, %r13d sall $8, %r13d subl %eax, %r13d movl %edx, %eax subl %r13d, %eax movb %al, (%rcx) addl %edi, %edx addq $1, %rcx cmpq %rsi, %rcx jne .L5 jmp .L11 .L6: addl $1, %ebx addl $2, %r12d addq $8192, %rbp cmpl $1024, %ebx je .L3 .L4: leal 3(%rbx), %r11d leaq 8(%rbp), %rsi movl %r12d, %r9d movl $0, %r8d leal 4(%rbx), %r10d jmp .L8 .L3: popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z10fillMatrixPh, .-_Z10fillMatrixPh .globl _Z10fillKernelPf .type _Z10fillKernelPf, @function _Z10fillKernelPf: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $56, %rsp .cfi_def_cfa_offset 112 movq %rdi, 40(%rsp) movl $-1, %r15d jmp .L13 .L25: movsd .LC2(%rip), %xmm1 movsd 8(%rsp), %xmm0 call fmod@PLT jmp .L15 .L18: subl $1, %r15d addq $800, 40(%rsp) cmpl $-129, %r15d je .L12 .L13: pxor %xmm0, %xmm0 cvtsi2sdl %r15d, %xmm0 mulsd .LC5(%rip), %xmm0 movsd %xmm0, 32(%rsp) movq 40(%rsp), %r14 movl $0, %r13d .L21: addl $1, %r13d pxor %xmm0, %xmm0 cvtsi2sdl %r13d, %xmm0 mulsd .LC4(%rip), %xmm0 addsd 32(%rsp), %xmm0 movsd %xmm0, 24(%rsp) movq %r14, %rbp movl $0, %r12d .L19: addl $1, %r12d pxor %xmm0, %xmm0 cvtsi2sdl %r12d, %xmm0 mulsd .LC3(%rip), %xmm0 movsd 24(%rsp), %xmm4 subsd %xmm0, %xmm4 movsd %xmm4, 16(%rsp) movl $1, %ebx .L16: pxor %xmm0, %xmm0 cvtsi2sdl %ebx, %xmm0 mulsd .LC0(%rip), %xmm0 addsd 16(%rsp), %xmm0 movsd %xmm0, 8(%rsp) fld1 fldl 8(%rsp) .L14: fprem fnstsw %ax testb $4, %ah jne .L14 fstp %st(1) fstpl (%rsp) movsd (%rsp), %xmm2 ucomisd %xmm2, %xmm2 jp .L25 .L15: pxor %xmm0, %xmm0 cvtsd2ss (%rsp), %xmm0 movss %xmm0, -4(%rbp,%rbx,4) addq $1, %rbx cmpq $9, %rbx jne .L16 addq $32, %rbp cmpl $5, %r12d jne .L19 addq $160, %r14 cmpl $5, %r13d jne .L21 jmp .L18 .L12: addq $56, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _Z10fillKernelPf, .-_Z10fillKernelPf .section .rodata.str1.1,"aMS",@progbits,1 .LC7: .string "w" .LC8: .string "assignment4_out" .LC9: .string "%4f " .LC10: .string "\n" .text .globl _Z20print_matrix_to_filePf .type _Z20print_matrix_to_filePf, @function _Z20print_matrix_to_filePf: .LFB2059: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movq %rdi, %r15 leaq .LC7(%rip), %rsi leaq .LC8(%rip), %rdi call fopen@PLT movq %rax, %r12 movq %r15, %r14 addq $532684800, %r15 leaq .LC9(%rip), %r13 .L27: leaq 4080(%r14), %rbp .L31: leaq -4080(%rbp), %rbx .L28: pxor %xmm0, %xmm0 cvtss2sd (%rbx), %xmm0 movq %r13, %rdx movl $2, %esi movq %r12, %rdi movl $1, %eax call __fprintf_chk@PLT addq $4, %rbx cmpq %rbp, %rbx jne .L28 addq $4080, %rbp leaq 4165680(%r14), %rax cmpq %rax, %rbp jne .L31 leaq .LC10(%rip), %rdx movl $2, %esi movq %r12, %rdi movl $0, %eax call __fprintf_chk@PLT addq $4161600, %r14 cmpq %r15, %r14 jne .L27 movq %r12, %rdi call fclose@PLT addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _Z20print_matrix_to_filePf, .-_Z20print_matrix_to_filePf .globl _Z28__device_stub__Z4convPhPfS0_PhPfS0_ .type _Z28__device_stub__Z4convPhPfS0_PhPfS0_, @function _Z28__device_stub__Z4convPhPfS0_PhPfS0_: .LFB2085: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L38 .L34: movq 120(%rsp), %rax subq %fs:40, %rax jne .L39 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L38: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z4convPhPfS0_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L34 .L39: call __stack_chk_fail@PLT .cfi_endproc .LFE2085: .size _Z28__device_stub__Z4convPhPfS0_PhPfS0_, .-_Z28__device_stub__Z4convPhPfS0_PhPfS0_ .globl _Z4convPhPfS0_ .type _Z4convPhPfS0_, @function _Z4convPhPfS0_: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z28__device_stub__Z4convPhPfS0_PhPfS0_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _Z4convPhPfS0_, .-_Z4convPhPfS0_ .section .rodata.str1.1 .LC12: .string "%f\n" .text .globl main .type main, @function main: .LFB2060: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $80, %rsp .cfi_def_cfa_offset 112 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $8388608, %edi call malloc@PLT movq %rax, %r12 movl $102400, %edi call malloc@PLT movq %rax, %rbp movl $532684800, %edi call malloc@PLT movq %rax, %rbx movq %r12, %rdi call _Z10fillMatrixPh movq %rbp, %rdi call _Z10fillKernelPf leaq 8(%rsp), %rdi movl $8388608, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movl $102400, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $532684800, %esi call cudaMalloc@PLT movl $1, %ecx movl $8388608, %edx movq %r12, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $102400, %edx movq %rbp, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT leaq 32(%rsp), %rdi call cudaEventCreate@PLT leaq 40(%rsp), %rdi call cudaEventCreate@PLT movl $0x00000000, 4(%rsp) movl $0, %esi movq 32(%rsp), %rdi call cudaEventRecord@PLT movl $1024, 60(%rsp) movl $1, 64(%rsp) movl $128, 48(%rsp) movl $1024, 52(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L46 .L43: call cudaDeviceSynchronize@PLT movl $0, %esi movq 40(%rsp), %rdi call cudaEventRecord@PLT movq 40(%rsp), %rdi call cudaEventSynchronize@PLT leaq 4(%rsp), %rdi movq 40(%rsp), %rdx movq 32(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 4(%rsp), %xmm0 leaq .LC12(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl $2, %ecx movl $532684800, %edx movq 24(%rsp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT movq %rbx, %rdi call _Z20print_matrix_to_filePf movq 72(%rsp), %rax subq %fs:40, %rax jne .L47 movl $0, %eax addq $80, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L46: .cfi_restore_state movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z28__device_stub__Z4convPhPfS0_PhPfS0_ jmp .L43 .L47: call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size main, .-main .section .rodata.str1.1 .LC13: .string "_Z4convPhPfS0_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC13(%rip), %rdx movq %rdx, %rcx leaq _Z4convPhPfS0_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 1717986918 .long 1075603046 .align 8 .LC2: .long 0 .long 1072693248 .align 8 .LC3: .long 858993459 .long 1075000115 .align 8 .LC4: .long -1717986918 .long 1074370969 .align 8 .LC5: .long -858993459 .long 1073794252 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "new.hip" .globl _Z10fillMatrixPh # -- Begin function _Z10fillMatrixPh .p2align 4, 0x90 .type _Z10fillMatrixPh,@function _Z10fillMatrixPh: # @_Z10fillMatrixPh .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 xorl %eax, %eax movl $3, %ecx movl $4, %edx movl $2155905153, %esi # imm = 0x80808081 xorl %r8d, %r8d .p2align 4, 0x90 .LBB0_1: # %.preheader23 # =>This Loop Header: Depth=1 # Child Loop BB0_2 Depth 2 # Child Loop BB0_3 Depth 3 movl %edx, %r9d movl %eax, %r10d movq %rdi, %r11 xorl %ebx, %ebx .p2align 4, 0x90 .LBB0_2: # %.preheader # Parent Loop BB0_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB0_3 Depth 3 movl %r10d, %ebp xorl %r14d, %r14d .p2align 4, 0x90 .LBB0_3: # Parent Loop BB0_1 Depth=1 # Parent Loop BB0_2 Depth=2 # => This Inner Loop Header: Depth=3 movl %ebp, %r15d imulq %rsi, %r15 shrq $39, %r15 addb %bpl, %r15b movb %r15b, (%r11,%r14) incq %r14 addl %r9d, %ebp cmpq $8, %r14 jne .LBB0_3 # %bb.4: # in Loop: Header=BB0_2 Depth=2 incq %rbx addq $8, %r11 addl %ecx, %r10d incl %r9d cmpq $1024, %rbx # imm = 0x400 jne .LBB0_2 # %bb.5: # in Loop: Header=BB0_1 Depth=1 incq %r8 addq $8192, %rdi # imm = 0x2000 addl $2, %eax incl %ecx incl %edx cmpq $1024, %r8 # imm = 0x400 jne .LBB0_1 # %bb.6: popq %rbx .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z10fillMatrixPh, .Lfunc_end0-_Z10fillMatrixPh .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z10fillKernelPf .LCPI1_0: .quad 0x4000cccccccccccd # double 2.1000000000000001 .LCPI1_1: .quad 0x400999999999999a # double 3.2000000000000002 .LCPI1_2: .quad 0xc013333333333333 # double -4.7999999999999998 .LCPI1_3: .quad 0x401c666666666666 # double 7.0999999999999996 .LCPI1_4: .quad 0x3ff0000000000000 # double 1 .text .globl _Z10fillKernelPf .p2align 4, 0x90 .type _Z10fillKernelPf,@function _Z10fillKernelPf: # @_Z10fillKernelPf .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $40, %rsp .cfi_def_cfa_offset 96 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 xorl %eax, %eax movsd .LCPI1_3(%rip), %xmm1 # xmm1 = mem[0],zero movsd .LCPI1_4(%rip), %xmm2 # xmm2 = mem[0],zero .p2align 4, 0x90 .LBB1_1: # %.preheader21 # =>This Loop Header: Depth=1 # Child Loop BB1_2 Depth 2 # Child Loop BB1_3 Depth 3 # Child Loop BB1_4 Depth 4 movq %rax, (%rsp) # 8-byte Spill # kill: def $eax killed $eax killed $rax notl %eax xorps %xmm0, %xmm0 cvtsi2sd %eax, %xmm0 mulsd .LCPI1_0(%rip), %xmm0 movsd %xmm0, 16(%rsp) # 8-byte Spill movq %rdi, 8(%rsp) # 8-byte Spill movq %rdi, %r15 xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_2: # %.preheader20 # Parent Loop BB1_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB1_3 Depth 3 # Child Loop BB1_4 Depth 4 incq %r12 xorps %xmm0, %xmm0 cvtsi2sd %r12d, %xmm0 mulsd .LCPI1_1(%rip), %xmm0 addsd 16(%rsp), %xmm0 # 8-byte Folded Reload movsd %xmm0, 24(%rsp) # 8-byte Spill movq %r15, %r13 xorl %ebp, %ebp .p2align 4, 0x90 .LBB1_3: # %.preheader # Parent Loop BB1_1 Depth=1 # Parent Loop BB1_2 Depth=2 # => This Loop Header: Depth=3 # Child Loop BB1_4 Depth 4 incq %rbp xorps %xmm0, %xmm0 cvtsi2sd %ebp, %xmm0 mulsd .LCPI1_2(%rip), %xmm0 addsd 24(%rsp), %xmm0 # 8-byte Folded Reload movsd %xmm0, 32(%rsp) # 8-byte Spill xorl %ebx, %ebx .p2align 4, 0x90 .LBB1_4: # Parent Loop BB1_1 Depth=1 # Parent Loop BB1_2 Depth=2 # Parent Loop BB1_3 Depth=3 # => This Inner Loop Header: Depth=4 leaq 1(%rbx), %r14 xorps %xmm0, %xmm0 cvtsi2sd %r14d, %xmm0 mulsd %xmm1, %xmm0 addsd 32(%rsp), %xmm0 # 8-byte Folded Reload movaps %xmm2, %xmm1 callq fmod movsd .LCPI1_4(%rip), %xmm2 # xmm2 = mem[0],zero movsd .LCPI1_3(%rip), %xmm1 # xmm1 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%r13,%rbx,4) movq %r14, %rbx cmpq $8, %r14 jne .LBB1_4 # %bb.5: # in Loop: Header=BB1_3 Depth=3 addq $32, %r13 cmpq $5, %rbp jne .LBB1_3 # %bb.6: # in Loop: Header=BB1_2 Depth=2 addq $160, %r15 cmpq $5, %r12 jne .LBB1_2 # %bb.7: # in Loop: Header=BB1_1 Depth=1 movq (%rsp), %rax # 8-byte Reload incq %rax movq 8(%rsp), %rdi # 8-byte Reload addq $800, %rdi # imm = 0x320 cmpq $128, %rax jne .LBB1_1 # %bb.8: addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z10fillKernelPf, .Lfunc_end1-_Z10fillKernelPf .cfi_endproc # -- End function .globl _Z20print_matrix_to_filePf # -- Begin function _Z20print_matrix_to_filePf .p2align 4, 0x90 .type _Z20print_matrix_to_filePf,@function _Z20print_matrix_to_filePf: # @_Z20print_matrix_to_filePf .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 pushq %rax .cfi_def_cfa_offset 64 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdi, %rbx movl $.L.str, %edi movl $.L.str.1, %esi callq fopen movq %rax, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_1: # %.preheader16 # =>This Loop Header: Depth=1 # Child Loop BB2_2 Depth 2 # Child Loop BB2_3 Depth 3 movq %rbx, %r12 xorl %r13d, %r13d .p2align 4, 0x90 .LBB2_2: # %.preheader # Parent Loop BB2_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB2_3 Depth 3 xorl %ebp, %ebp .p2align 4, 0x90 .LBB2_3: # Parent Loop BB2_1 Depth=1 # Parent Loop BB2_2 Depth=2 # => This Inner Loop Header: Depth=3 movss (%r12,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.2, %esi movq %r14, %rdi movb $1, %al callq fprintf incq %rbp cmpq $1020, %rbp # imm = 0x3FC jne .LBB2_3 # %bb.4: # in Loop: Header=BB2_2 Depth=2 incq %r13 addq $4080, %r12 # imm = 0xFF0 cmpq $1020, %r13 # imm = 0x3FC jne .LBB2_2 # %bb.5: # in Loop: Header=BB2_1 Depth=1 movl $10, %edi movq %r14, %rsi callq fputc@PLT incq %r15 addq $4161600, %rbx # imm = 0x3F8040 cmpq $128, %r15 jne .LBB2_1 # %bb.6: movq %r14, %rdi addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp fclose # TAILCALL .Lfunc_end2: .size _Z20print_matrix_to_filePf, .Lfunc_end2-_Z20print_matrix_to_filePf .cfi_endproc # -- End function .globl _Z19__device_stub__convPhPfS0_ # -- Begin function _Z19__device_stub__convPhPfS0_ .p2align 4, 0x90 .type _Z19__device_stub__convPhPfS0_,@function _Z19__device_stub__convPhPfS0_: # @_Z19__device_stub__convPhPfS0_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z4convPhPfS0_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end3: .size _Z19__device_stub__convPhPfS0_, .Lfunc_end3-_Z19__device_stub__convPhPfS0_ .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI4_0: .quad 0x4000cccccccccccd # double 2.1000000000000001 .LCPI4_1: .quad 0x400999999999999a # double 3.2000000000000002 .LCPI4_2: .quad 0xc013333333333333 # double -4.7999999999999998 .LCPI4_3: .quad 0x401c666666666666 # double 7.0999999999999996 .LCPI4_4: .quad 0x3ff0000000000000 # double 1 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $216, %rsp .cfi_def_cfa_offset 272 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $8388608, %edi # imm = 0x800000 callq malloc movq %rax, %rbx movl $102400, %edi # imm = 0x19000 callq malloc movq %rax, %rbp movl $532684800, %edi # imm = 0x1FC02000 callq malloc xorl %r13d, %r13d movl $3, %ecx movl $4, %edx movl $2155905153, %esi # imm = 0x80808081 movq %rbx, 56(%rsp) # 8-byte Spill movq %rbx, %rdi xorl %r8d, %r8d .p2align 4, 0x90 .LBB4_1: # %.preheader23.i # =>This Loop Header: Depth=1 # Child Loop BB4_2 Depth 2 # Child Loop BB4_3 Depth 3 movl %edx, %r9d movl %r13d, %r10d movq %rdi, %r11 xorl %r14d, %r14d .p2align 4, 0x90 .LBB4_2: # %.preheader.i # Parent Loop BB4_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB4_3 Depth 3 movl %r10d, %ebx xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_3: # Parent Loop BB4_1 Depth=1 # Parent Loop BB4_2 Depth=2 # => This Inner Loop Header: Depth=3 movl %ebx, %r12d imulq %rsi, %r12 shrq $39, %r12 addb %bl, %r12b movb %r12b, (%r11,%r15) incq %r15 addl %r9d, %ebx cmpq $8, %r15 jne .LBB4_3 # %bb.4: # in Loop: Header=BB4_2 Depth=2 incq %r14 addq $8, %r11 addl %ecx, %r10d incl %r9d cmpq $1024, %r14 # imm = 0x400 jne .LBB4_2 # %bb.5: # in Loop: Header=BB4_1 Depth=1 incq %r8 addq $8192, %rdi # imm = 0x2000 addl $2, %r13d incl %ecx incl %edx cmpq $1024, %r8 # imm = 0x400 jne .LBB4_1 # %bb.6: # %.preheader21.i.preheader movq %rax, 72(%rsp) # 8-byte Spill xorl %eax, %eax movsd .LCPI4_3(%rip), %xmm1 # xmm1 = mem[0],zero movsd .LCPI4_4(%rip), %xmm2 # xmm2 = mem[0],zero movq %rbp, 64(%rsp) # 8-byte Spill movq %rbp, %r13 .p2align 4, 0x90 .LBB4_7: # %.preheader21.i # =>This Loop Header: Depth=1 # Child Loop BB4_8 Depth 2 # Child Loop BB4_9 Depth 3 # Child Loop BB4_10 Depth 4 movq %rax, 88(%rsp) # 8-byte Spill # kill: def $eax killed $eax killed $rax notl %eax xorps %xmm0, %xmm0 cvtsi2sd %eax, %xmm0 mulsd .LCPI4_0(%rip), %xmm0 movsd %xmm0, 96(%rsp) # 8-byte Spill movq %r13, 80(%rsp) # 8-byte Spill xorl %r14d, %r14d .p2align 4, 0x90 .LBB4_8: # %.preheader20.i # Parent Loop BB4_7 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB4_9 Depth 3 # Child Loop BB4_10 Depth 4 incq %r14 xorps %xmm0, %xmm0 cvtsi2sd %r14d, %xmm0 mulsd .LCPI4_1(%rip), %xmm0 addsd 96(%rsp), %xmm0 # 8-byte Folded Reload movsd %xmm0, 104(%rsp) # 8-byte Spill movq %r13, %rbp xorl %ebx, %ebx .p2align 4, 0x90 .LBB4_9: # %.preheader.i11 # Parent Loop BB4_7 Depth=1 # Parent Loop BB4_8 Depth=2 # => This Loop Header: Depth=3 # Child Loop BB4_10 Depth 4 incq %rbx xorps %xmm0, %xmm0 cvtsi2sd %ebx, %xmm0 mulsd .LCPI4_2(%rip), %xmm0 addsd 104(%rsp), %xmm0 # 8-byte Folded Reload movsd %xmm0, 112(%rsp) # 8-byte Spill xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_10: # Parent Loop BB4_7 Depth=1 # Parent Loop BB4_8 Depth=2 # Parent Loop BB4_9 Depth=3 # => This Inner Loop Header: Depth=4 leaq 1(%r15), %r12 xorps %xmm0, %xmm0 cvtsi2sd %r12d, %xmm0 mulsd %xmm1, %xmm0 addsd 112(%rsp), %xmm0 # 8-byte Folded Reload movaps %xmm2, %xmm1 callq fmod movsd .LCPI4_4(%rip), %xmm2 # xmm2 = mem[0],zero movsd .LCPI4_3(%rip), %xmm1 # xmm1 = mem[0],zero cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%rbp,%r15,4) movq %r12, %r15 cmpq $8, %r12 jne .LBB4_10 # %bb.11: # in Loop: Header=BB4_9 Depth=3 addq $32, %rbp cmpq $5, %rbx jne .LBB4_9 # %bb.12: # in Loop: Header=BB4_8 Depth=2 addq $160, %r13 cmpq $5, %r14 jne .LBB4_8 # %bb.13: # in Loop: Header=BB4_7 Depth=1 movq 88(%rsp), %rax # 8-byte Reload incq %rax movq 80(%rsp), %r13 # 8-byte Reload addq $800, %r13 # imm = 0x320 cmpq $128, %rax jne .LBB4_7 # %bb.14: # %_Z10fillKernelPf.exit leaq 48(%rsp), %rdi movl $8388608, %esi # imm = 0x800000 callq hipMalloc leaq 40(%rsp), %rdi movl $102400, %esi # imm = 0x19000 callq hipMalloc leaq 32(%rsp), %rdi movl $532684800, %esi # imm = 0x1FC02000 callq hipMalloc movq 48(%rsp), %rdi movl $8388608, %edx # imm = 0x800000 movq 56(%rsp), %rsi # 8-byte Reload movl $1, %ecx callq hipMemcpy movq 40(%rsp), %rdi movl $102400, %edx # imm = 0x19000 movq 64(%rsp), %rsi # 8-byte Reload movl $1, %ecx callq hipMemcpy leaq 24(%rsp), %rdi callq hipEventCreate leaq 16(%rsp), %rdi callq hipEventCreate movl $0, 12(%rsp) movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movabsq $4398046511232, %rdi # imm = 0x40000000080 movabsq $4294968320, %rdx # imm = 0x100000400 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_16 # %bb.15: movq 48(%rsp), %rax movq 40(%rsp), %rcx movq 32(%rsp), %rdx movq %rax, 184(%rsp) movq %rcx, 176(%rsp) movq %rdx, 168(%rsp) leaq 184(%rsp), %rax movq %rax, 192(%rsp) leaq 176(%rsp), %rax movq %rax, 200(%rsp) leaq 168(%rsp), %rax movq %rax, 208(%rsp) leaq 152(%rsp), %rdi leaq 136(%rsp), %rsi leaq 128(%rsp), %rdx leaq 120(%rsp), %rcx callq __hipPopCallConfiguration movq 152(%rsp), %rsi movl 160(%rsp), %edx movq 136(%rsp), %rcx movl 144(%rsp), %r8d leaq 192(%rsp), %r9 movl $_Z4convPhPfS0_, %edi pushq 120(%rsp) .cfi_adjust_cfa_offset 8 pushq 136(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_16: callq hipDeviceSynchronize movq 16(%rsp), %rdi xorl %r15d, %r15d xorl %esi, %esi callq hipEventRecord movq 16(%rsp), %rdi callq hipEventSynchronize movq 24(%rsp), %rsi movq 16(%rsp), %rdx leaq 12(%rsp), %rdi callq hipEventElapsedTime movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.4, %edi movb $1, %al callq printf movq 32(%rsp), %rsi movl $532684800, %edx # imm = 0x1FC02000 movq 72(%rsp), %rbp # 8-byte Reload movq %rbp, %rdi movl $2, %ecx callq hipMemcpy movl $.L.str, %edi movl $.L.str.1, %esi callq fopen movq %rax, %r14 .p2align 4, 0x90 .LBB4_17: # %.preheader16.i # =>This Loop Header: Depth=1 # Child Loop BB4_18 Depth 2 # Child Loop BB4_19 Depth 3 movq %rbp, %rbx xorl %r12d, %r12d .p2align 4, 0x90 .LBB4_18: # %.preheader.i17 # Parent Loop BB4_17 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB4_19 Depth 3 xorl %r13d, %r13d .p2align 4, 0x90 .LBB4_19: # Parent Loop BB4_17 Depth=1 # Parent Loop BB4_18 Depth=2 # => This Inner Loop Header: Depth=3 movss (%rbx,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.2, %esi movq %r14, %rdi movb $1, %al callq fprintf incq %r13 cmpq $1020, %r13 # imm = 0x3FC jne .LBB4_19 # %bb.20: # in Loop: Header=BB4_18 Depth=2 incq %r12 addq $4080, %rbx # imm = 0xFF0 cmpq $1020, %r12 # imm = 0x3FC jne .LBB4_18 # %bb.21: # in Loop: Header=BB4_17 Depth=1 movl $10, %edi movq %r14, %rsi callq fputc@PLT incq %r15 addq $4161600, %rbp # imm = 0x3F8040 cmpq $128, %r15 jne .LBB4_17 # %bb.22: # %_Z20print_matrix_to_filePf.exit movq %r14, %rdi callq fclose xorl %eax, %eax addq $216, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size main, .Lfunc_end4-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB5_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB5_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z4convPhPfS0_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end5: .size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB6_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB6_2: retq .Lfunc_end6: .size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "assignment4_out" .size .L.str, 16 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "w" .size .L.str.1, 2 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%4f " .size .L.str.2, 5 .type _Z4convPhPfS0_,@object # @_Z4convPhPfS0_ .section .rodata,"a",@progbits .globl _Z4convPhPfS0_ .p2align 3, 0x0 _Z4convPhPfS0_: .quad _Z19__device_stub__convPhPfS0_ .size _Z4convPhPfS0_, 8 .type .L.str.4,@object # @.str.4 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.4: .asciz "%f\n" .size .L.str.4, 4 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z4convPhPfS0_" .size .L__unnamed_1, 15 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z19__device_stub__convPhPfS0_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z4convPhPfS0_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <iostream> #include <chrono> #include <functional> #include <cmath> #include <stdio.h> using namespace std::chrono; typedef std::function<void*() > func; class Matrix; __global__ void cudaMatMulClass(const int* A, int an, int am, const int* B, int bn, int bm, int* out); class Matrix{ private: int m_dimX; int m_dimY; int* m_content; void deleteContent(){ if(m_content != NULL){ cudaFree(m_content); m_content = NULL; } } class helper{ public: int& operator[](int j){ return m_M->m_content[m_i*(m_M->dimY()) +j]; } helper(const Matrix* m, int i): m_M(m), m_i(i){} private: const Matrix* m_M; int m_i; }; // class helperC{ // public: // int operator[](int j){ // return m_M->m_content[m_i*j]; // } // helper(const Matrix* m, int i): m_M(m), m_i(i){} // private: // const Matrix* m_M; // int m_i; // }; public: Matrix():m_dimX(-1), m_dimY(-1), m_content(NULL){} Matrix(int dy, int dx, int val): m_dimX(dx), m_dimY(dy){ cudaMallocManaged(&m_content, m_dimX*m_dimY*sizeof(int)); for(int i=0; i<(m_dimY*m_dimX); i++){ m_content[i] = val; } } Matrix(int dy, int dx): m_dimX(dx), m_dimY(dy){ cudaMallocManaged(&m_content, m_dimX*m_dimY*sizeof(int)); } int nElem(){return m_dimX*m_dimY;} int dimX() const{return m_dimX;} int dimY() const{return m_dimY;} Matrix& operator=(const Matrix& other){ if(this != &other){ this->deleteContent(); this->m_content = other.m_content; this->m_dimX = other.m_dimX; this->m_dimY = other.m_dimY; } return *this; } void print(){ //TODO for(int i=0; i<m_dimY; i++){ for(int j=0; j<m_dimX; j++){ std::cout << (*this)[i][j] << ","; } std::cout << std::endl; } std::cout << "("<<m_dimY<<","<<m_dimX<<")"<<std::endl; } helper operator[] (const int i) const{ return helper(this, i); } Matrix matMul(const Matrix& other) const{ if( this->m_dimX != other.m_dimY ){ std::cout << "dimenstion don't fit " << std::endl; throw 0; } int blockSize = 256; int numBlocks = ((m_dimY * other.m_dimX) + blockSize - 1) / blockSize; Matrix ret(m_dimY, other.m_dimX); cudaMatMulClass<<<numBlocks,blockSize>>>(m_content, m_dimY, m_dimX, other.m_content, other.dimY(), other.dimX(), ret.m_content); //cudaMatMul<<<(2,2),(3,3)>>>(A, an, am, B, bn, bm, out); cudaDeviceSynchronize(); return ret; } }; __global__ void cudaMatMulClass(const int* A, int an, int am, const int* B, int bn, int bm, int* out){ int i,j,ij, k; int ijS = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; //how many threads ber bock * how many blocks in the grid; //int jS = blockIdx.y * blockDim.y + threadIdx.y; //int jStride = blockDim.y * gridDim.y; //how many threads ber bock * how many blocks in the grid; if(blockIdx.x==0 && threadIdx.x==0){ printf("blockInd.x= %d ,blockDim.x= %d, threadIdx.x= %d, GridDim.x= %d\n", blockIdx.x ,blockDim.x , threadIdx.x ,gridDim.x); printf("blockInd.y= %d ,blockDim.y= %d, threadIdx.y= %d, GridDim.y= %d\n", blockIdx.y ,blockDim.y , threadIdx.y ,gridDim.y); printf("ijS= %d ,stride= %d \n", ijS, stride); } for(ij=ijS; ij < an*bm; ij+=stride){ i = ij/bm; j = ij%bm; out[ij]=0; for(k=0; k<am; k++){ out[ij] += A[i*an + k]*B[k*bn +j]; } //printf("(i:%d, j:%d)=%d \n", i,j, out[i][j]); } } __global__ void cudaMatMul(int** A, int an, int am, int** B, int bn, int bm, int** out){ int i,j,ij, k; int ijS = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; //how many threads ber bock * how many blocks in the grid; //int jS = blockIdx.y * blockDim.y + threadIdx.y; //int jStride = blockDim.y * gridDim.y; //how many threads ber bock * how many blocks in the grid; if(blockIdx.x==0 && threadIdx.x==0){ printf("blockInd.x= %d ,blockDim.x= %d, threadIdx.x= %d, GridDim.x= %d\n", blockIdx.x ,blockDim.x , threadIdx.x ,gridDim.x); printf("blockInd.y= %d ,blockDim.y= %d, threadIdx.y= %d, GridDim.y= %d\n", blockIdx.y ,blockDim.y , threadIdx.y ,gridDim.y); printf("ijS= %d ,stride= %d \n", ijS, stride); } //std::cout << "blockInd.x="<< blockIdx.x << " blockDim.x=" << blockDim.x << " threadIdx.x=" << threadIdx.x << " GridDim.x" << gridDim.x << std::endl; //std::cout << "blockInd.y="<< blockIdx.y << " blockDim.y=" << blockDim.y << " threadIdx.y=" << threadIdx.y << " GridDim.y" << gridDim.y << std::endl; //for(i=iS; i < an; i+=iStride){ // for(j=0; j<bm; j++){ // // go threw all fields in output // // initialize output field as 0 // out[i][j]=0; // // for(k=0; k < am; k++){ // out[i][j] += A[i][k] * B[k][j]; // } // } //} //version with single loop for(ij=ijS; ij < an*bm; ij+=stride){ i = ij/bm; j = ij%bm; out[i][j]=0; for(k=0; k<am; k++){ out[i][j] += A[i][k]*B[k][j]; } //printf("(i:%d, j:%d)=%d \n", i,j, out[i][j]); } } int** matMul(int** A, int an, int am, int** B, int bn, int bm){ if( am != bn ){ std::cout << "dimenstion don't fit" << std::endl; throw 0; } int N = std::max(an, bm); int blockSize = 256; int numBlocks = ((an * bm) + blockSize - 1) / blockSize; int i; int **out; //out = new int*[an]; cudaMallocManaged(&out, an*sizeof(int*)); for(i=0; i < an ; i++){ //out[i] = new int[bm]; cudaMallocManaged(&out[i], bm*sizeof(int)); } cudaMatMul<<<numBlocks,blockSize>>>(A, an, am, B, bn, bm, out); //cudaMatMul<<<(2,2),(3,3)>>>(A, an, am, B, bn, bm, out); cudaDeviceSynchronize(); return out; } /// initialize matrix int** init(int an, int am, int value){ //int** out = new int*[an]; int** out; cudaMallocManaged(&out, an*sizeof(int*)); for(int i=0; i<an; i++){ //out[i] = new int[am]; cudaMallocManaged(&out[i], am*sizeof(int)); for(int j=0; j<am; j++){ out[i][j] = value; } } return out; } void print(int** A, int an, int am){ std::cout << "A = " << std::endl; for(int i=0; i<an; i++){ for(int j=0; j<am; j++){ std::cout << A[i][j] << ","; } std::cout << std::endl; } std::cout << "("<<an<<","<<am<<")"<<std::endl; } template <class retType> retType measureTime(func& f){ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); void* result = f(); std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<milliseconds>( t2 - t1 ).count(); std::cout << "execution took " << duration << " milliseconds" << std::endl; return (retType)result; } int main(){ std::cout << "gpu version" << std::endl; int A[2][2]={{1,2},{3,4}}; int B[2][3]={{1,1,1},{1,1,1}}; int **out; int **C = init(300, 500, 1); int **D = init(500, 900, 1); func f = [C,D](){return (void*)matMul(C, 300, 500, D, 500, 900);}; out=measureTime<int**>(f); std::cout << "the new one " << std:: endl; //print(out, 300, 900); Matrix MA(300,500,1); Matrix MB(500,900,1); //func f = [A,B](){return (void*)A.matMul(B);} //Matrix C = measureTime<Matrix>(f); Matrix MC = MA.matMul(MB); //MC.print(); return 0; }
.file "tmpxft_001535d8_00000000-6_GPUMatMul.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E10_M_managerERSt9_Any_dataRKS4_St18_Manager_operation, @function _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E10_M_managerERSt9_Any_dataRKS4_St18_Manager_operation: .LFB5581: .cfi_startproc endbr64 testl %edx, %edx je .L2 cmpl $1, %edx je .L3 cmpl $2, %edx jne .L5 movdqu (%rsi), %xmm0 movups %xmm0, (%rdi) jmp .L5 .L2: leaq _ZTIZ4mainEUlvE_(%rip), %rax movq %rax, (%rdi) .L5: movl $0, %eax ret .L3: movq %rsi, (%rdi) jmp .L5 .cfi_endproc .LFE5581: .size _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E10_M_managerERSt9_Any_dataRKS4_St18_Manager_operation, .-_ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E10_M_managerERSt9_Any_dataRKS4_St18_Manager_operation .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB5042: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE5042: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .text._ZNSt14_Function_baseD2Ev,"axG",@progbits,_ZNSt14_Function_baseD5Ev,comdat .align 2 .weak _ZNSt14_Function_baseD2Ev .type _ZNSt14_Function_baseD2Ev, @function _ZNSt14_Function_baseD2Ev: .LFB3784: .cfi_startproc .cfi_personality 0x9b,DW.ref.__gxx_personality_v0 .cfi_lsda 0x1b,.LLSDA3784 endbr64 movq 16(%rdi), %rax testq %rax, %rax je .L12 subq $8, %rsp .cfi_def_cfa_offset 16 movl $3, %edx movq %rdi, %rsi call *%rax addq $8, %rsp .cfi_def_cfa_offset 8 ret .L12: ret .cfi_endproc .LFE3784: .globl __gxx_personality_v0 .section .gcc_except_table._ZNSt14_Function_baseD2Ev,"aG",@progbits,_ZNSt14_Function_baseD5Ev,comdat .LLSDA3784: .byte 0xff .byte 0xff .byte 0x1 .uleb128 .LLSDACSE3784-.LLSDACSB3784 .LLSDACSB3784: .LLSDACSE3784: .section .text._ZNSt14_Function_baseD2Ev,"axG",@progbits,_ZNSt14_Function_baseD5Ev,comdat .size _ZNSt14_Function_baseD2Ev, .-_ZNSt14_Function_baseD2Ev .weak _ZNSt14_Function_baseD1Ev .set _ZNSt14_Function_baseD1Ev,_ZNSt14_Function_baseD2Ev .section .text._ZN6MatrixC2Eiii,"axG",@progbits,_ZN6MatrixC5Eiii,comdat .align 2 .weak _ZN6MatrixC2Eiii .type _ZN6MatrixC2Eiii, @function _ZN6MatrixC2Eiii: .LFB5016: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movq %rdi, %rbx movl %ecx, %ebp movl %edx, (%rdi) movl %esi, 4(%rdi) imull %esi, %edx movslq %edx, %rsi salq $2, %rsi leaq 8(%rdi), %rdi movl $1, %edx call cudaMallocManaged@PLT movl 4(%rbx), %eax imull (%rbx), %eax testl %eax, %eax jle .L15 movl $0, %eax .L17: movq 8(%rbx), %rdx movl %ebp, (%rdx,%rax,4) addq $1, %rax movl 4(%rbx), %edx imull (%rbx), %edx cmpl %eax, %edx jg .L17 .L15: addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE5016: .size _ZN6MatrixC2Eiii, .-_ZN6MatrixC2Eiii .weak _ZN6MatrixC1Eiii .set _ZN6MatrixC1Eiii,_ZN6MatrixC2Eiii .text .globl _Z4initiii .type _Z4initiii, @function _Z4initiii: .LFB5030: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $16, %rsp .cfi_def_cfa_offset 64 movl %edi, %ebx movl %esi, %r14d movl %edx, %r12d movq %fs:40, %rax movq %rax, 8(%rsp) xorl %eax, %eax movslq %edi, %r13 salq $3, %r13 movq %rsp, %rdi movl $1, %edx movq %r13, %rsi call cudaMallocManaged@PLT testl %ebx, %ebx jle .L21 movslq %r14d, %rbp salq $2, %rbp movl $0, %ebx .L24: movq %rbx, %rdi addq (%rsp), %rdi movl $1, %edx movq %rbp, %rsi call cudaMallocManaged@PLT testl %r14d, %r14d jle .L22 movl $0, %eax .L23: movq (%rsp), %rdx movq (%rdx,%rbx), %rdx movl %r12d, (%rdx,%rax) addq $4, %rax cmpq %rax, %rbp jne .L23 .L22: addq $8, %rbx cmpq %rbx, %r13 jne .L24 .L21: movq (%rsp), %rax movq 8(%rsp), %rdx subq %fs:40, %rdx jne .L29 addq $16, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE5030: .size _Z4initiii, .-_Z4initiii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "A = " .LC1: .string "," .LC2: .string "(" .LC3: .string ")" .text .globl _Z5printPPiii .type _Z5printPPiii, @function _Z5printPPiii: .LFB5031: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $24, %rsp .cfi_def_cfa_offset 80 movq %rdi, %rbx movl %esi, 12(%rsp) movl %edx, %r15d movl $4, %edx leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rbp movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq 240(%rbp,%rax), %rbp testq %rbp, %rbp je .L48 cmpb $0, 56(%rbp) je .L32 movzbl 67(%rbp), %eax .L33: movsbl %al, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl 12(%rsp), %eax testl %eax, %eax jle .L34 movq %rbx, %r12 cltq leaq (%rbx,%rax,8), %rax movq %rax, (%rsp) movslq %r15d, %r13 salq $2, %r13 leaq _ZSt4cout(%rip), %rbp leaq .LC1(%rip), %r14 jmp .L35 .L48: call _ZSt16__throw_bad_castv@PLT .L32: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) jmp .L33 .L49: call _ZSt16__throw_bad_castv@PLT .L50: movzbl 67(%rbx), %esi .L39: movsbl %sil, %esi movq %rbp, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT addq $8, %r12 cmpq %r12, (%rsp) je .L34 .L35: movl $0, %ebx testl %r15d, %r15d jle .L40 .L36: movq (%r12), %rax movl (%rax,%rbx), %esi movq %rbp, %rdi call _ZNSolsEi@PLT movq %rax, %rdi movl $1, %edx movq %r14, %rsi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT addq $4, %rbx cmpq %r13, %rbx jne .L36 .L40: movq 0(%rbp), %rax movq -24(%rax), %rax movq 240(%rbp,%rax), %rbx testq %rbx, %rbx je .L49 cmpb $0, 56(%rbx) jne .L50 movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) movl %eax, %esi jmp .L39 .L34: movl $1, %edx leaq .LC2(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl 12(%rsp), %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movl $1, %edx leaq .LC1(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl %r15d, %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movl $1, %edx leaq .LC3(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L51 cmpb $0, 56(%rbp) je .L42 movzbl 67(%rbp), %eax .L43: movsbl %al, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT addq $24, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L51: .cfi_restore_state call _ZSt16__throw_bad_castv@PLT .L42: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) jmp .L43 .cfi_endproc .LFE5031: .size _Z5printPPiii, .-_Z5printPPiii .globl _Z45__device_stub__Z15cudaMatMulClassPKiiiS0_iiPiPKiiiS0_iiPi .type _Z45__device_stub__Z15cudaMatMulClassPKiiiS0_iiPiPKiiiS0_iiPi, @function _Z45__device_stub__Z15cudaMatMulClassPKiiiS0_iiPiPKiiiS0_iiPi: .LFB5064: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movl %esi, 36(%rsp) movl %edx, 32(%rsp) movq %rcx, 24(%rsp) movl %r8d, 20(%rsp) movl %r9d, 16(%rsp) movq 192(%rsp), %rax movq %rax, 8(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 36(%rsp), %rax movq %rax, 120(%rsp) leaq 32(%rsp), %rax movq %rax, 128(%rsp) leaq 24(%rsp), %rax movq %rax, 136(%rsp) leaq 20(%rsp), %rax movq %rax, 144(%rsp) leaq 16(%rsp), %rax movq %rax, 152(%rsp) leaq 8(%rsp), %rax movq %rax, 160(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L56 .L52: movq 168(%rsp), %rax subq %fs:40, %rax jne .L57 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L56: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z15cudaMatMulClassPKiiiS0_iiPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L52 .L57: call __stack_chk_fail@PLT .cfi_endproc .LFE5064: .size _Z45__device_stub__Z15cudaMatMulClassPKiiiS0_iiPiPKiiiS0_iiPi, .-_Z45__device_stub__Z15cudaMatMulClassPKiiiS0_iiPiPKiiiS0_iiPi .globl _Z15cudaMatMulClassPKiiiS0_iiPi .type _Z15cudaMatMulClassPKiiiS0_iiPi, @function _Z15cudaMatMulClassPKiiiS0_iiPi: .LFB5065: .cfi_startproc endbr64 subq $16, %rsp .cfi_def_cfa_offset 24 pushq 24(%rsp) .cfi_def_cfa_offset 32 call _Z45__device_stub__Z15cudaMatMulClassPKiiiS0_iiPiPKiiiS0_iiPi addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE5065: .size _Z15cudaMatMulClassPKiiiS0_iiPi, .-_Z15cudaMatMulClassPKiiiS0_iiPi .section .rodata._ZNK6Matrix6matMulERKS_.str1.1,"aMS",@progbits,1 .LC4: .string "dimenstion don't fit " .section .text._ZNK6Matrix6matMulERKS_,"axG",@progbits,_ZNK6Matrix6matMulERKS_,comdat .align 2 .weak _ZNK6Matrix6matMulERKS_ .type _ZNK6Matrix6matMulERKS_, @function _ZNK6Matrix6matMulERKS_: .LFB5027: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $64, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl 4(%rsi), %eax cmpl %eax, (%rdi) jne .L66 movq %rdi, %r12 movq %rsi, %rbp movl 4(%rdi), %eax movl (%rsi), %edx movl %eax, %ebx imull %edx, %ebx movl %edx, 32(%rsp) movl %eax, 36(%rsp) movslq %ebx, %rsi salq $2, %rsi leaq 40(%rsp), %rdi movl $1, %edx call cudaMallocManaged@PLT movl $256, 20(%rsp) movl $1, 24(%rsp) leal 510(%rbx), %eax addl $255, %ebx cmovns %ebx, %eax sarl $8, %eax movl %eax, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L67 .L63: call cudaDeviceSynchronize@PLT movq 32(%rsp), %rax movq 40(%rsp), %rdx movq 56(%rsp), %rcx subq %fs:40, %rcx jne .L68 addq $64, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L66: .cfi_restore_state leaq .LC4(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $4, %edi call __cxa_allocate_exception@PLT movq %rax, %rdi movl $0, (%rax) movq 56(%rsp), %rax subq %fs:40, %rax jne .L69 movl $0, %edx leaq _ZTIi(%rip), %rsi call __cxa_throw@PLT .L69: call __stack_chk_fail@PLT .L67: movq 8(%rbp), %rcx movl (%r12), %edx movl 4(%r12), %esi movq 8(%r12), %rdi subq $8, %rsp .cfi_def_cfa_offset 104 pushq 48(%rsp) .cfi_def_cfa_offset 112 movl 0(%rbp), %r9d movl 4(%rbp), %r8d call _Z45__device_stub__Z15cudaMatMulClassPKiiiS0_iiPiPKiiiS0_iiPi addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L63 .L68: call __stack_chk_fail@PLT .cfi_endproc .LFE5027: .size _ZNK6Matrix6matMulERKS_, .-_ZNK6Matrix6matMulERKS_ .text .globl _Z41__device_stub__Z10cudaMatMulPPiiiS0_iiS0_PPiiiS0_iiS0_ .type _Z41__device_stub__Z10cudaMatMulPPiiiS0_iiS0_PPiiiS0_iiS0_, @function _Z41__device_stub__Z10cudaMatMulPPiiiS0_iiS0_PPiiiS0_iiS0_: .LFB5066: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movl %esi, 36(%rsp) movl %edx, 32(%rsp) movq %rcx, 24(%rsp) movl %r8d, 20(%rsp) movl %r9d, 16(%rsp) movq 192(%rsp), %rax movq %rax, 8(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 36(%rsp), %rax movq %rax, 120(%rsp) leaq 32(%rsp), %rax movq %rax, 128(%rsp) leaq 24(%rsp), %rax movq %rax, 136(%rsp) leaq 20(%rsp), %rax movq %rax, 144(%rsp) leaq 16(%rsp), %rax movq %rax, 152(%rsp) leaq 8(%rsp), %rax movq %rax, 160(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L74 .L70: movq 168(%rsp), %rax subq %fs:40, %rax jne .L75 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L74: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z10cudaMatMulPPiiiS0_iiS0_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L70 .L75: call __stack_chk_fail@PLT .cfi_endproc .LFE5066: .size _Z41__device_stub__Z10cudaMatMulPPiiiS0_iiS0_PPiiiS0_iiS0_, .-_Z41__device_stub__Z10cudaMatMulPPiiiS0_iiS0_PPiiiS0_iiS0_ .globl _Z10cudaMatMulPPiiiS0_iiS0_ .type _Z10cudaMatMulPPiiiS0_iiS0_, @function _Z10cudaMatMulPPiiiS0_iiS0_: .LFB5067: .cfi_startproc endbr64 subq $16, %rsp .cfi_def_cfa_offset 24 pushq 24(%rsp) .cfi_def_cfa_offset 32 call _Z41__device_stub__Z10cudaMatMulPPiiiS0_iiS0_PPiiiS0_iiS0_ addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE5067: .size _Z10cudaMatMulPPiiiS0_iiS0_, .-_Z10cudaMatMulPPiiiS0_iiS0_ .section .rodata.str1.1 .LC5: .string "dimenstion don't fit" .text .globl _Z6matMulPPiiiS0_ii .type _Z6matMulPPiiiS0_ii, @function _Z6matMulPPiiiS0_ii: .LFB5028: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $88, %rsp .cfi_def_cfa_offset 144 movq %rdi, 8(%rsp) movl %edx, 24(%rsp) movq %rcx, 16(%rsp) movl %r8d, 28(%rsp) movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax cmpl %r8d, %edx jne .L87 movl %esi, %r13d movl %r9d, %r15d movl %esi, %eax imull %r9d, %eax leal 510(%rax), %r14d addl $255, %eax cmovns %eax, %r14d sarl $8, %r14d movslq %esi, %rbp salq $3, %rbp leaq 40(%rsp), %rdi movl $1, %edx movq %rbp, %rsi call cudaMallocManaged@PLT testl %r13d, %r13d jle .L81 movslq %r15d, %r12 salq $2, %r12 movl $0, %ebx .L82: movq %rbx, %rdi addq 40(%rsp), %rdi movl $1, %edx movq %r12, %rsi call cudaMallocManaged@PLT addq $8, %rbx cmpq %rbx, %rbp jne .L82 .L81: movl $256, 60(%rsp) movl $1, 64(%rsp) movl %r14d, 48(%rsp) movl $1, 52(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L88 .L83: call cudaDeviceSynchronize@PLT movq 40(%rsp), %rax movq 72(%rsp), %rdx subq %fs:40, %rdx jne .L89 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L87: .cfi_restore_state leaq .LC5(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $4, %edi call __cxa_allocate_exception@PLT movq %rax, %rdi movl $0, (%rax) movq 72(%rsp), %rax subq %fs:40, %rax jne .L90 movl $0, %edx leaq _ZTIi(%rip), %rsi call __cxa_throw@PLT .L90: call __stack_chk_fail@PLT .L88: subq $8, %rsp .cfi_def_cfa_offset 152 pushq 48(%rsp) .cfi_def_cfa_offset 160 movl %r15d, %r9d movl 44(%rsp), %r8d movq 32(%rsp), %rcx movl 40(%rsp), %edx movl %r13d, %esi movq 24(%rsp), %rdi call _Z41__device_stub__Z10cudaMatMulPPiiiS0_iiS0_PPiiiS0_iiS0_ addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L83 .L89: call __stack_chk_fail@PLT .cfi_endproc .LFE5028: .size _Z6matMulPPiiiS0_ii, .-_Z6matMulPPiiiS0_ii .type _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E9_M_invokeERKSt9_Any_data, @function _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E9_M_invokeERKSt9_Any_data: .LFB5577: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq 8(%rdi), %rcx movq (%rdi), %rdi movl $900, %r9d movl $500, %r8d movl $500, %edx movl $300, %esi call _Z6matMulPPiiiS0_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE5577: .size _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E9_M_invokeERKSt9_Any_data, .-_ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E9_M_invokeERKSt9_Any_data .section .rodata.str1.1 .LC6: .string "_Z10cudaMatMulPPiiiS0_iiS0_" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC7: .string "_Z15cudaMatMulClassPKiiiS0_iiPi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB5069: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq _Z10cudaMatMulPPiiiS0_iiS0_(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z15cudaMatMulClassPKiiiS0_iiPi(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE5069: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .rodata._Z11measureTimeIPPiET_RSt8functionIFPvvEE.str1.1,"aMS",@progbits,1 .LC8: .string "execution took " .LC9: .string " milliseconds" .section .text._Z11measureTimeIPPiET_RSt8functionIFPvvEE,"axG",@progbits,_Z11measureTimeIPPiET_RSt8functionIFPvvEE,comdat .weak _Z11measureTimeIPPiET_RSt8functionIFPvvEE .type _Z11measureTimeIPPiET_RSt8functionIFPvvEE, @function _Z11measureTimeIPPiET_RSt8functionIFPvvEE: .LFB5415: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movq %rdi, %rbx call _ZNSt6chrono3_V212system_clock3nowEv@PLT cmpq $0, 16(%rbx) je .L101 movq %rax, %r12 movq %rbx, %rdi call *24(%rbx) movq %rax, %rbp call _ZNSt6chrono3_V212system_clock3nowEv@PLT subq %r12, %rax movq %rax, %rcx movabsq $4835703278458516699, %rdx imulq %rdx sarq $18, %rdx sarq $63, %rcx subq %rcx, %rdx movq %rdx, %rbx movl $15, %edx leaq .LC8(%rip), %rsi leaq _ZSt4cout(%rip), %r12 movq %r12, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq %rbx, %rsi movq %r12, %rdi call _ZNSo9_M_insertIlEERSoT_@PLT movq %rax, %rbx movl $13, %edx leaq .LC9(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r12 testq %r12, %r12 je .L102 cmpb $0, 56(%r12) je .L98 movzbl 67(%r12), %esi .L99: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movq %rbp, %rax popq %rbx .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L101: .cfi_restore_state call _ZSt25__throw_bad_function_callv@PLT .L102: call _ZSt16__throw_bad_castv@PLT .L98: movq %r12, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r12), %rax movl $10, %esi movq %r12, %rdi call *48(%rax) movl %eax, %esi jmp .L99 .cfi_endproc .LFE5415: .size _Z11measureTimeIPPiET_RSt8functionIFPvvEE, .-_Z11measureTimeIPPiET_RSt8functionIFPvvEE .section .rodata.str1.1 .LC10: .string "gpu version" .LC11: .string "the new one " .text .globl main .type main, @function main: .LFB5033: .cfi_startproc .cfi_personality 0x9b,DW.ref.__gxx_personality_v0 .cfi_lsda 0x1b,.LLSDA5033 endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $80, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax leaq .LC10(%rip), %rsi leaq _ZSt4cout(%rip), %rdi .LEHB0: call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $1, %edx movl $500, %esi movl $300, %edi call _Z4initiii movq %rax, %rbx movl $1, %edx movl $900, %esi movl $500, %edi call _Z4initiii .LEHE0: movq %rbx, 32(%rsp) movq %rax, 40(%rsp) leaq _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E9_M_invokeERKSt9_Any_data(%rip), %rax movq %rax, 56(%rsp) leaq _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E10_M_managerERSt9_Any_dataRKS4_St18_Manager_operation(%rip), %rax movq %rax, 48(%rsp) leaq 32(%rsp), %rdi .LEHB1: call _Z11measureTimeIPPiET_RSt8functionIFPvvEE leaq .LC11(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq %rsp, %rdi movl $1, %ecx movl $500, %edx movl $300, %esi call _ZN6MatrixC1Eiii leaq 16(%rsp), %rdi movl $1, %ecx movl $900, %edx movl $500, %esi call _ZN6MatrixC1Eiii leaq 16(%rsp), %rsi movq %rsp, %rdi call _ZNK6Matrix6matMulERKS_ .LEHE1: leaq 32(%rsp), %rdi call _ZNSt14_Function_baseD2Ev movq 72(%rsp), %rax subq %fs:40, %rax jne .L110 movl $0, %eax addq $80, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L107: .cfi_restore_state endbr64 movq %rax, %rbx leaq 32(%rsp), %rdi call _ZNSt14_Function_baseD2Ev movq 72(%rsp), %rax subq %fs:40, %rax je .L105 call __stack_chk_fail@PLT .L105: movq %rbx, %rdi .LEHB2: call _Unwind_Resume@PLT .LEHE2: .L110: call __stack_chk_fail@PLT .cfi_endproc .LFE5033: .section .gcc_except_table,"a",@progbits .LLSDA5033: .byte 0xff .byte 0xff .byte 0x1 .uleb128 .LLSDACSE5033-.LLSDACSB5033 .LLSDACSB5033: .uleb128 .LEHB0-.LFB5033 .uleb128 .LEHE0-.LEHB0 .uleb128 0 .uleb128 0 .uleb128 .LEHB1-.LFB5033 .uleb128 .LEHE1-.LEHB1 .uleb128 .L107-.LFB5033 .uleb128 0 .uleb128 .LEHB2-.LFB5033 .uleb128 .LEHE2-.LEHB2 .uleb128 0 .uleb128 0 .LLSDACSE5033: .text .size main, .-main .section .data.rel.ro,"aw" .align 8 .type _ZTIZ4mainEUlvE_, @object .size _ZTIZ4mainEUlvE_, 16 _ZTIZ4mainEUlvE_: .quad _ZTVN10__cxxabiv117__class_type_infoE+16 .quad _ZTSZ4mainEUlvE_ .section .rodata .align 8 .type _ZTSZ4mainEUlvE_, @object .size _ZTSZ4mainEUlvE_, 14 _ZTSZ4mainEUlvE_: .string "*Z4mainEUlvE_" .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .hidden DW.ref.__gxx_personality_v0 .weak DW.ref.__gxx_personality_v0 .section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat .align 8 .type DW.ref.__gxx_personality_v0, @object .size DW.ref.__gxx_personality_v0, 8 DW.ref.__gxx_personality_v0: .quad __gxx_personality_v0 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <chrono> #include <functional> #include <cmath> #include <stdio.h> using namespace std::chrono; typedef std::function<void*() > func; class Matrix; __global__ void cudaMatMulClass(const int* A, int an, int am, const int* B, int bn, int bm, int* out); class Matrix{ private: int m_dimX; int m_dimY; int* m_content; void deleteContent(){ if(m_content != NULL){ cudaFree(m_content); m_content = NULL; } } class helper{ public: int& operator[](int j){ return m_M->m_content[m_i*(m_M->dimY()) +j]; } helper(const Matrix* m, int i): m_M(m), m_i(i){} private: const Matrix* m_M; int m_i; }; // class helperC{ // public: // int operator[](int j){ // return m_M->m_content[m_i*j]; // } // helper(const Matrix* m, int i): m_M(m), m_i(i){} // private: // const Matrix* m_M; // int m_i; // }; public: Matrix():m_dimX(-1), m_dimY(-1), m_content(NULL){} Matrix(int dy, int dx, int val): m_dimX(dx), m_dimY(dy){ cudaMallocManaged(&m_content, m_dimX*m_dimY*sizeof(int)); for(int i=0; i<(m_dimY*m_dimX); i++){ m_content[i] = val; } } Matrix(int dy, int dx): m_dimX(dx), m_dimY(dy){ cudaMallocManaged(&m_content, m_dimX*m_dimY*sizeof(int)); } int nElem(){return m_dimX*m_dimY;} int dimX() const{return m_dimX;} int dimY() const{return m_dimY;} Matrix& operator=(const Matrix& other){ if(this != &other){ this->deleteContent(); this->m_content = other.m_content; this->m_dimX = other.m_dimX; this->m_dimY = other.m_dimY; } return *this; } void print(){ //TODO for(int i=0; i<m_dimY; i++){ for(int j=0; j<m_dimX; j++){ std::cout << (*this)[i][j] << ","; } std::cout << std::endl; } std::cout << "("<<m_dimY<<","<<m_dimX<<")"<<std::endl; } helper operator[] (const int i) const{ return helper(this, i); } Matrix matMul(const Matrix& other) const{ if( this->m_dimX != other.m_dimY ){ std::cout << "dimenstion don't fit " << std::endl; throw 0; } int blockSize = 256; int numBlocks = ((m_dimY * other.m_dimX) + blockSize - 1) / blockSize; Matrix ret(m_dimY, other.m_dimX); cudaMatMulClass<<<numBlocks,blockSize>>>(m_content, m_dimY, m_dimX, other.m_content, other.dimY(), other.dimX(), ret.m_content); //cudaMatMul<<<(2,2),(3,3)>>>(A, an, am, B, bn, bm, out); cudaDeviceSynchronize(); return ret; } }; __global__ void cudaMatMulClass(const int* A, int an, int am, const int* B, int bn, int bm, int* out){ int i,j,ij, k; int ijS = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; //how many threads ber bock * how many blocks in the grid; //int jS = blockIdx.y * blockDim.y + threadIdx.y; //int jStride = blockDim.y * gridDim.y; //how many threads ber bock * how many blocks in the grid; if(blockIdx.x==0 && threadIdx.x==0){ printf("blockInd.x= %d ,blockDim.x= %d, threadIdx.x= %d, GridDim.x= %d\n", blockIdx.x ,blockDim.x , threadIdx.x ,gridDim.x); printf("blockInd.y= %d ,blockDim.y= %d, threadIdx.y= %d, GridDim.y= %d\n", blockIdx.y ,blockDim.y , threadIdx.y ,gridDim.y); printf("ijS= %d ,stride= %d \n", ijS, stride); } for(ij=ijS; ij < an*bm; ij+=stride){ i = ij/bm; j = ij%bm; out[ij]=0; for(k=0; k<am; k++){ out[ij] += A[i*an + k]*B[k*bn +j]; } //printf("(i:%d, j:%d)=%d \n", i,j, out[i][j]); } } __global__ void cudaMatMul(int** A, int an, int am, int** B, int bn, int bm, int** out){ int i,j,ij, k; int ijS = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; //how many threads ber bock * how many blocks in the grid; //int jS = blockIdx.y * blockDim.y + threadIdx.y; //int jStride = blockDim.y * gridDim.y; //how many threads ber bock * how many blocks in the grid; if(blockIdx.x==0 && threadIdx.x==0){ printf("blockInd.x= %d ,blockDim.x= %d, threadIdx.x= %d, GridDim.x= %d\n", blockIdx.x ,blockDim.x , threadIdx.x ,gridDim.x); printf("blockInd.y= %d ,blockDim.y= %d, threadIdx.y= %d, GridDim.y= %d\n", blockIdx.y ,blockDim.y , threadIdx.y ,gridDim.y); printf("ijS= %d ,stride= %d \n", ijS, stride); } //std::cout << "blockInd.x="<< blockIdx.x << " blockDim.x=" << blockDim.x << " threadIdx.x=" << threadIdx.x << " GridDim.x" << gridDim.x << std::endl; //std::cout << "blockInd.y="<< blockIdx.y << " blockDim.y=" << blockDim.y << " threadIdx.y=" << threadIdx.y << " GridDim.y" << gridDim.y << std::endl; //for(i=iS; i < an; i+=iStride){ // for(j=0; j<bm; j++){ // // go threw all fields in output // // initialize output field as 0 // out[i][j]=0; // // for(k=0; k < am; k++){ // out[i][j] += A[i][k] * B[k][j]; // } // } //} //version with single loop for(ij=ijS; ij < an*bm; ij+=stride){ i = ij/bm; j = ij%bm; out[i][j]=0; for(k=0; k<am; k++){ out[i][j] += A[i][k]*B[k][j]; } //printf("(i:%d, j:%d)=%d \n", i,j, out[i][j]); } } int** matMul(int** A, int an, int am, int** B, int bn, int bm){ if( am != bn ){ std::cout << "dimenstion don't fit" << std::endl; throw 0; } int N = std::max(an, bm); int blockSize = 256; int numBlocks = ((an * bm) + blockSize - 1) / blockSize; int i; int **out; //out = new int*[an]; cudaMallocManaged(&out, an*sizeof(int*)); for(i=0; i < an ; i++){ //out[i] = new int[bm]; cudaMallocManaged(&out[i], bm*sizeof(int)); } cudaMatMul<<<numBlocks,blockSize>>>(A, an, am, B, bn, bm, out); //cudaMatMul<<<(2,2),(3,3)>>>(A, an, am, B, bn, bm, out); cudaDeviceSynchronize(); return out; } /// initialize matrix int** init(int an, int am, int value){ //int** out = new int*[an]; int** out; cudaMallocManaged(&out, an*sizeof(int*)); for(int i=0; i<an; i++){ //out[i] = new int[am]; cudaMallocManaged(&out[i], am*sizeof(int)); for(int j=0; j<am; j++){ out[i][j] = value; } } return out; } void print(int** A, int an, int am){ std::cout << "A = " << std::endl; for(int i=0; i<an; i++){ for(int j=0; j<am; j++){ std::cout << A[i][j] << ","; } std::cout << std::endl; } std::cout << "("<<an<<","<<am<<")"<<std::endl; } template <class retType> retType measureTime(func& f){ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); void* result = f(); std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<milliseconds>( t2 - t1 ).count(); std::cout << "execution took " << duration << " milliseconds" << std::endl; return (retType)result; } int main(){ std::cout << "gpu version" << std::endl; int A[2][2]={{1,2},{3,4}}; int B[2][3]={{1,1,1},{1,1,1}}; int **out; int **C = init(300, 500, 1); int **D = init(500, 900, 1); func f = [C,D](){return (void*)matMul(C, 300, 500, D, 500, 900);}; out=measureTime<int**>(f); std::cout << "the new one " << std:: endl; //print(out, 300, 900); Matrix MA(300,500,1); Matrix MB(500,900,1); //func f = [A,B](){return (void*)A.matMul(B);} //Matrix C = measureTime<Matrix>(f); Matrix MC = MA.matMul(MB); //MC.print(); return 0; }
#include <hip/hip_runtime.h> #include <iostream> #include <chrono> #include <functional> #include <cmath> #include <stdio.h> using namespace std::chrono; typedef std::function<void*() > func; class Matrix; __global__ void cudaMatMulClass(const int* A, int an, int am, const int* B, int bn, int bm, int* out); class Matrix{ private: int m_dimX; int m_dimY; int* m_content; void deleteContent(){ if(m_content != NULL){ hipFree(m_content); m_content = NULL; } } class helper{ public: int& operator[](int j){ return m_M->m_content[m_i*(m_M->dimY()) +j]; } helper(const Matrix* m, int i): m_M(m), m_i(i){} private: const Matrix* m_M; int m_i; }; // class helperC{ // public: // int operator[](int j){ // return m_M->m_content[m_i*j]; // } // helper(const Matrix* m, int i): m_M(m), m_i(i){} // private: // const Matrix* m_M; // int m_i; // }; public: Matrix():m_dimX(-1), m_dimY(-1), m_content(NULL){} Matrix(int dy, int dx, int val): m_dimX(dx), m_dimY(dy){ hipMallocManaged(&m_content, m_dimX*m_dimY*sizeof(int)); for(int i=0; i<(m_dimY*m_dimX); i++){ m_content[i] = val; } } Matrix(int dy, int dx): m_dimX(dx), m_dimY(dy){ hipMallocManaged(&m_content, m_dimX*m_dimY*sizeof(int)); } int nElem(){return m_dimX*m_dimY;} int dimX() const{return m_dimX;} int dimY() const{return m_dimY;} Matrix& operator=(const Matrix& other){ if(this != &other){ this->deleteContent(); this->m_content = other.m_content; this->m_dimX = other.m_dimX; this->m_dimY = other.m_dimY; } return *this; } void print(){ //TODO for(int i=0; i<m_dimY; i++){ for(int j=0; j<m_dimX; j++){ std::cout << (*this)[i][j] << ","; } std::cout << std::endl; } std::cout << "("<<m_dimY<<","<<m_dimX<<")"<<std::endl; } helper operator[] (const int i) const{ return helper(this, i); } Matrix matMul(const Matrix& other) const{ if( this->m_dimX != other.m_dimY ){ std::cout << "dimenstion don't fit " << std::endl; throw 0; } int blockSize = 256; int numBlocks = ((m_dimY * other.m_dimX) + blockSize - 1) / blockSize; Matrix ret(m_dimY, other.m_dimX); cudaMatMulClass<<<numBlocks,blockSize>>>(m_content, m_dimY, m_dimX, other.m_content, other.dimY(), other.dimX(), ret.m_content); //cudaMatMul<<<(2,2),(3,3)>>>(A, an, am, B, bn, bm, out); hipDeviceSynchronize(); return ret; } }; __global__ void cudaMatMulClass(const int* A, int an, int am, const int* B, int bn, int bm, int* out){ int i,j,ij, k; int ijS = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; //how many threads ber bock * how many blocks in the grid; //int jS = blockIdx.y * blockDim.y + threadIdx.y; //int jStride = blockDim.y * gridDim.y; //how many threads ber bock * how many blocks in the grid; if(blockIdx.x==0 && threadIdx.x==0){ printf("blockInd.x= %d ,blockDim.x= %d, threadIdx.x= %d, GridDim.x= %d\n", blockIdx.x ,blockDim.x , threadIdx.x ,gridDim.x); printf("blockInd.y= %d ,blockDim.y= %d, threadIdx.y= %d, GridDim.y= %d\n", blockIdx.y ,blockDim.y , threadIdx.y ,gridDim.y); printf("ijS= %d ,stride= %d \n", ijS, stride); } for(ij=ijS; ij < an*bm; ij+=stride){ i = ij/bm; j = ij%bm; out[ij]=0; for(k=0; k<am; k++){ out[ij] += A[i*an + k]*B[k*bn +j]; } //printf("(i:%d, j:%d)=%d \n", i,j, out[i][j]); } } __global__ void cudaMatMul(int** A, int an, int am, int** B, int bn, int bm, int** out){ int i,j,ij, k; int ijS = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; //how many threads ber bock * how many blocks in the grid; //int jS = blockIdx.y * blockDim.y + threadIdx.y; //int jStride = blockDim.y * gridDim.y; //how many threads ber bock * how many blocks in the grid; if(blockIdx.x==0 && threadIdx.x==0){ printf("blockInd.x= %d ,blockDim.x= %d, threadIdx.x= %d, GridDim.x= %d\n", blockIdx.x ,blockDim.x , threadIdx.x ,gridDim.x); printf("blockInd.y= %d ,blockDim.y= %d, threadIdx.y= %d, GridDim.y= %d\n", blockIdx.y ,blockDim.y , threadIdx.y ,gridDim.y); printf("ijS= %d ,stride= %d \n", ijS, stride); } //std::cout << "blockInd.x="<< blockIdx.x << " blockDim.x=" << blockDim.x << " threadIdx.x=" << threadIdx.x << " GridDim.x" << gridDim.x << std::endl; //std::cout << "blockInd.y="<< blockIdx.y << " blockDim.y=" << blockDim.y << " threadIdx.y=" << threadIdx.y << " GridDim.y" << gridDim.y << std::endl; //for(i=iS; i < an; i+=iStride){ // for(j=0; j<bm; j++){ // // go threw all fields in output // // initialize output field as 0 // out[i][j]=0; // // for(k=0; k < am; k++){ // out[i][j] += A[i][k] * B[k][j]; // } // } //} //version with single loop for(ij=ijS; ij < an*bm; ij+=stride){ i = ij/bm; j = ij%bm; out[i][j]=0; for(k=0; k<am; k++){ out[i][j] += A[i][k]*B[k][j]; } //printf("(i:%d, j:%d)=%d \n", i,j, out[i][j]); } } int** matMul(int** A, int an, int am, int** B, int bn, int bm){ if( am != bn ){ std::cout << "dimenstion don't fit" << std::endl; throw 0; } int N = std::max(an, bm); int blockSize = 256; int numBlocks = ((an * bm) + blockSize - 1) / blockSize; int i; int **out; //out = new int*[an]; hipMallocManaged(&out, an*sizeof(int*)); for(i=0; i < an ; i++){ //out[i] = new int[bm]; hipMallocManaged(&out[i], bm*sizeof(int)); } cudaMatMul<<<numBlocks,blockSize>>>(A, an, am, B, bn, bm, out); //cudaMatMul<<<(2,2),(3,3)>>>(A, an, am, B, bn, bm, out); hipDeviceSynchronize(); return out; } /// initialize matrix int** init(int an, int am, int value){ //int** out = new int*[an]; int** out; hipMallocManaged(&out, an*sizeof(int*)); for(int i=0; i<an; i++){ //out[i] = new int[am]; hipMallocManaged(&out[i], am*sizeof(int)); for(int j=0; j<am; j++){ out[i][j] = value; } } return out; } void print(int** A, int an, int am){ std::cout << "A = " << std::endl; for(int i=0; i<an; i++){ for(int j=0; j<am; j++){ std::cout << A[i][j] << ","; } std::cout << std::endl; } std::cout << "("<<an<<","<<am<<")"<<std::endl; } template <class retType> retType measureTime(func& f){ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); void* result = f(); std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<milliseconds>( t2 - t1 ).count(); std::cout << "execution took " << duration << " milliseconds" << std::endl; return (retType)result; } int main(){ std::cout << "gpu version" << std::endl; int A[2][2]={{1,2},{3,4}}; int B[2][3]={{1,1,1},{1,1,1}}; int **out; int **C = init(300, 500, 1); int **D = init(500, 900, 1); func f = [C,D](){return (void*)matMul(C, 300, 500, D, 500, 900);}; out=measureTime<int**>(f); std::cout << "the new one " << std:: endl; //print(out, 300, 900); Matrix MA(300,500,1); Matrix MB(500,900,1); //func f = [A,B](){return (void*)A.matMul(B);} //Matrix C = measureTime<Matrix>(f); Matrix MC = MA.matMul(MB); //MC.print(); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <chrono> #include <functional> #include <cmath> #include <stdio.h> using namespace std::chrono; typedef std::function<void*() > func; class Matrix; __global__ void cudaMatMulClass(const int* A, int an, int am, const int* B, int bn, int bm, int* out); class Matrix{ private: int m_dimX; int m_dimY; int* m_content; void deleteContent(){ if(m_content != NULL){ hipFree(m_content); m_content = NULL; } } class helper{ public: int& operator[](int j){ return m_M->m_content[m_i*(m_M->dimY()) +j]; } helper(const Matrix* m, int i): m_M(m), m_i(i){} private: const Matrix* m_M; int m_i; }; // class helperC{ // public: // int operator[](int j){ // return m_M->m_content[m_i*j]; // } // helper(const Matrix* m, int i): m_M(m), m_i(i){} // private: // const Matrix* m_M; // int m_i; // }; public: Matrix():m_dimX(-1), m_dimY(-1), m_content(NULL){} Matrix(int dy, int dx, int val): m_dimX(dx), m_dimY(dy){ hipMallocManaged(&m_content, m_dimX*m_dimY*sizeof(int)); for(int i=0; i<(m_dimY*m_dimX); i++){ m_content[i] = val; } } Matrix(int dy, int dx): m_dimX(dx), m_dimY(dy){ hipMallocManaged(&m_content, m_dimX*m_dimY*sizeof(int)); } int nElem(){return m_dimX*m_dimY;} int dimX() const{return m_dimX;} int dimY() const{return m_dimY;} Matrix& operator=(const Matrix& other){ if(this != &other){ this->deleteContent(); this->m_content = other.m_content; this->m_dimX = other.m_dimX; this->m_dimY = other.m_dimY; } return *this; } void print(){ //TODO for(int i=0; i<m_dimY; i++){ for(int j=0; j<m_dimX; j++){ std::cout << (*this)[i][j] << ","; } std::cout << std::endl; } std::cout << "("<<m_dimY<<","<<m_dimX<<")"<<std::endl; } helper operator[] (const int i) const{ return helper(this, i); } Matrix matMul(const Matrix& other) const{ if( this->m_dimX != other.m_dimY ){ std::cout << "dimenstion don't fit " << std::endl; throw 0; } int blockSize = 256; int numBlocks = ((m_dimY * other.m_dimX) + blockSize - 1) / blockSize; Matrix ret(m_dimY, other.m_dimX); cudaMatMulClass<<<numBlocks,blockSize>>>(m_content, m_dimY, m_dimX, other.m_content, other.dimY(), other.dimX(), ret.m_content); //cudaMatMul<<<(2,2),(3,3)>>>(A, an, am, B, bn, bm, out); hipDeviceSynchronize(); return ret; } }; __global__ void cudaMatMulClass(const int* A, int an, int am, const int* B, int bn, int bm, int* out){ int i,j,ij, k; int ijS = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; //how many threads ber bock * how many blocks in the grid; //int jS = blockIdx.y * blockDim.y + threadIdx.y; //int jStride = blockDim.y * gridDim.y; //how many threads ber bock * how many blocks in the grid; if(blockIdx.x==0 && threadIdx.x==0){ printf("blockInd.x= %d ,blockDim.x= %d, threadIdx.x= %d, GridDim.x= %d\n", blockIdx.x ,blockDim.x , threadIdx.x ,gridDim.x); printf("blockInd.y= %d ,blockDim.y= %d, threadIdx.y= %d, GridDim.y= %d\n", blockIdx.y ,blockDim.y , threadIdx.y ,gridDim.y); printf("ijS= %d ,stride= %d \n", ijS, stride); } for(ij=ijS; ij < an*bm; ij+=stride){ i = ij/bm; j = ij%bm; out[ij]=0; for(k=0; k<am; k++){ out[ij] += A[i*an + k]*B[k*bn +j]; } //printf("(i:%d, j:%d)=%d \n", i,j, out[i][j]); } } __global__ void cudaMatMul(int** A, int an, int am, int** B, int bn, int bm, int** out){ int i,j,ij, k; int ijS = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; //how many threads ber bock * how many blocks in the grid; //int jS = blockIdx.y * blockDim.y + threadIdx.y; //int jStride = blockDim.y * gridDim.y; //how many threads ber bock * how many blocks in the grid; if(blockIdx.x==0 && threadIdx.x==0){ printf("blockInd.x= %d ,blockDim.x= %d, threadIdx.x= %d, GridDim.x= %d\n", blockIdx.x ,blockDim.x , threadIdx.x ,gridDim.x); printf("blockInd.y= %d ,blockDim.y= %d, threadIdx.y= %d, GridDim.y= %d\n", blockIdx.y ,blockDim.y , threadIdx.y ,gridDim.y); printf("ijS= %d ,stride= %d \n", ijS, stride); } //std::cout << "blockInd.x="<< blockIdx.x << " blockDim.x=" << blockDim.x << " threadIdx.x=" << threadIdx.x << " GridDim.x" << gridDim.x << std::endl; //std::cout << "blockInd.y="<< blockIdx.y << " blockDim.y=" << blockDim.y << " threadIdx.y=" << threadIdx.y << " GridDim.y" << gridDim.y << std::endl; //for(i=iS; i < an; i+=iStride){ // for(j=0; j<bm; j++){ // // go threw all fields in output // // initialize output field as 0 // out[i][j]=0; // // for(k=0; k < am; k++){ // out[i][j] += A[i][k] * B[k][j]; // } // } //} //version with single loop for(ij=ijS; ij < an*bm; ij+=stride){ i = ij/bm; j = ij%bm; out[i][j]=0; for(k=0; k<am; k++){ out[i][j] += A[i][k]*B[k][j]; } //printf("(i:%d, j:%d)=%d \n", i,j, out[i][j]); } } int** matMul(int** A, int an, int am, int** B, int bn, int bm){ if( am != bn ){ std::cout << "dimenstion don't fit" << std::endl; throw 0; } int N = std::max(an, bm); int blockSize = 256; int numBlocks = ((an * bm) + blockSize - 1) / blockSize; int i; int **out; //out = new int*[an]; hipMallocManaged(&out, an*sizeof(int*)); for(i=0; i < an ; i++){ //out[i] = new int[bm]; hipMallocManaged(&out[i], bm*sizeof(int)); } cudaMatMul<<<numBlocks,blockSize>>>(A, an, am, B, bn, bm, out); //cudaMatMul<<<(2,2),(3,3)>>>(A, an, am, B, bn, bm, out); hipDeviceSynchronize(); return out; } /// initialize matrix int** init(int an, int am, int value){ //int** out = new int*[an]; int** out; hipMallocManaged(&out, an*sizeof(int*)); for(int i=0; i<an; i++){ //out[i] = new int[am]; hipMallocManaged(&out[i], am*sizeof(int)); for(int j=0; j<am; j++){ out[i][j] = value; } } return out; } void print(int** A, int an, int am){ std::cout << "A = " << std::endl; for(int i=0; i<an; i++){ for(int j=0; j<am; j++){ std::cout << A[i][j] << ","; } std::cout << std::endl; } std::cout << "("<<an<<","<<am<<")"<<std::endl; } template <class retType> retType measureTime(func& f){ std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); void* result = f(); std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<milliseconds>( t2 - t1 ).count(); std::cout << "execution took " << duration << " milliseconds" << std::endl; return (retType)result; } int main(){ std::cout << "gpu version" << std::endl; int A[2][2]={{1,2},{3,4}}; int B[2][3]={{1,1,1},{1,1,1}}; int **out; int **C = init(300, 500, 1); int **D = init(500, 900, 1); func f = [C,D](){return (void*)matMul(C, 300, 500, D, 500, 900);}; out=measureTime<int**>(f); std::cout << "the new one " << std:: endl; //print(out, 300, 900); Matrix MA(300,500,1); Matrix MB(500,900,1); //func f = [A,B](){return (void*)A.matMul(B);} //Matrix C = measureTime<Matrix>(f); Matrix MC = MA.matMul(MB); //MC.print(); return 0; }
.text .file "GPUMatMul.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z30__device_stub__cudaMatMulClassPKiiiS0_iiPi # -- Begin function _Z30__device_stub__cudaMatMulClassPKiiiS0_iiPi .p2align 4, 0x90 .type _Z30__device_stub__cudaMatMulClassPKiiiS0_iiPi,@function _Z30__device_stub__cudaMatMulClassPKiiiS0_iiPi: # @_Z30__device_stub__cudaMatMulClassPKiiiS0_iiPi .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movl %edx, 8(%rsp) movq %rcx, 64(%rsp) movl %r8d, 4(%rsp) movl %r9d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 8(%rsp), %rax movq %rax, 96(%rsp) leaq 64(%rsp), %rax movq %rax, 104(%rsp) leaq 4(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) leaq 144(%rsp), %rax movq %rax, 128(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z15cudaMatMulClassPKiiiS0_iiPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $152, %rsp .cfi_adjust_cfa_offset -152 retq .Lfunc_end0: .size _Z30__device_stub__cudaMatMulClassPKiiiS0_iiPi, .Lfunc_end0-_Z30__device_stub__cudaMatMulClassPKiiiS0_iiPi .cfi_endproc # -- End function .globl _Z25__device_stub__cudaMatMulPPiiiS0_iiS0_ # -- Begin function _Z25__device_stub__cudaMatMulPPiiiS0_iiS0_ .p2align 4, 0x90 .type _Z25__device_stub__cudaMatMulPPiiiS0_iiS0_,@function _Z25__device_stub__cudaMatMulPPiiiS0_iiS0_: # @_Z25__device_stub__cudaMatMulPPiiiS0_iiS0_ .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movl %edx, 8(%rsp) movq %rcx, 64(%rsp) movl %r8d, 4(%rsp) movl %r9d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 8(%rsp), %rax movq %rax, 96(%rsp) leaq 64(%rsp), %rax movq %rax, 104(%rsp) leaq 4(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) leaq 144(%rsp), %rax movq %rax, 128(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z10cudaMatMulPPiiiS0_iiS0_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $152, %rsp .cfi_adjust_cfa_offset -152 retq .Lfunc_end1: .size _Z25__device_stub__cudaMatMulPPiiiS0_iiS0_, .Lfunc_end1-_Z25__device_stub__cudaMatMulPPiiiS0_iiS0_ .cfi_endproc # -- End function .globl _Z6matMulPPiiiS0_ii # -- Begin function _Z6matMulPPiiiS0_ii .p2align 4, 0x90 .type _Z6matMulPPiiiS0_ii,@function _Z6matMulPPiiiS0_ii: # @_Z6matMulPPiiiS0_ii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $184, %rsp .cfi_def_cfa_offset 240 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rcx, 48(%rsp) # 8-byte Spill movq %rdi, 40(%rsp) # 8-byte Spill movl %r8d, 20(%rsp) # 4-byte Spill movl %edx, 16(%rsp) # 4-byte Spill cmpl %r8d, %edx jne .LBB2_7 # %bb.1: movl %r9d, %ebx movl %esi, %r12d movl %r9d, %r13d imull %esi, %r13d leal 255(%r13), %eax addl $510, %r13d # imm = 0x1FE testl %eax, %eax cmovnsl %eax, %r13d sarl $8, %r13d movslq %esi, %r14 leaq (,%r14,8), %rsi leaq 8(%rsp), %rdi movl $1, %edx callq hipMallocManaged testl %r14d, %r14d jle .LBB2_4 # %bb.2: # %.lr.ph movslq %ebx, %r14 shlq $2, %r14 movl %r12d, %ebp shlq $3, %rbp xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_3: # =>This Inner Loop Header: Depth=1 movq 8(%rsp), %rdi addq %r15, %rdi movq %r14, %rsi movl $1, %edx callq hipMallocManaged addq $8, %r15 cmpq %r15, %rbp jne .LBB2_3 .LBB2_4: # %._crit_edge movl %r13d, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $256, %rdx # imm = 0x100 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_6 # %bb.5: movq 8(%rsp), %rax movq 40(%rsp), %rcx # 8-byte Reload movq %rcx, 120(%rsp) movl %r12d, 36(%rsp) movl 16(%rsp), %ecx # 4-byte Reload movl %ecx, 32(%rsp) movq 48(%rsp), %rcx # 8-byte Reload movq %rcx, 112(%rsp) movl 20(%rsp), %ecx # 4-byte Reload movl %ecx, 28(%rsp) movl %ebx, 24(%rsp) movq %rax, 104(%rsp) leaq 120(%rsp), %rax movq %rax, 128(%rsp) leaq 36(%rsp), %rax movq %rax, 136(%rsp) leaq 32(%rsp), %rax movq %rax, 144(%rsp) leaq 112(%rsp), %rax movq %rax, 152(%rsp) leaq 28(%rsp), %rax movq %rax, 160(%rsp) leaq 24(%rsp), %rax movq %rax, 168(%rsp) leaq 104(%rsp), %rax movq %rax, 176(%rsp) leaq 88(%rsp), %rdi leaq 72(%rsp), %rsi leaq 64(%rsp), %rdx leaq 56(%rsp), %rcx callq __hipPopCallConfiguration movq 88(%rsp), %rsi movl 96(%rsp), %edx movq 72(%rsp), %rcx movl 80(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z10cudaMatMulPPiiiS0_iiS0_, %edi pushq 56(%rsp) .cfi_adjust_cfa_offset 8 pushq 72(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_6: callq hipDeviceSynchronize movq 8(%rsp), %rax addq $184, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB2_7: .cfi_def_cfa_offset 240 movl $_ZSt4cout, %edi movl $.L.str, %esi callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movq %rax, %rdi callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_ movl $4, %edi callq __cxa_allocate_exception movl $0, (%rax) movl $_ZTIi, %esi movq %rax, %rdi xorl %edx, %edx callq __cxa_throw .Lfunc_end2: .size _Z6matMulPPiiiS0_ii, .Lfunc_end2-_Z6matMulPPiiiS0_ii .cfi_endproc # -- End function .globl _Z4initiii # -- Begin function _Z4initiii .p2align 4, 0x90 .type _Z4initiii,@function _Z4initiii: # @_Z4initiii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 pushq %rax .cfi_def_cfa_offset 64 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %edx, %ebx movl %esi, %ebp movl %edi, %r15d movslq %edi, %r14 leaq (,%r14,8), %rsi movq %rsp, %rdi movl $1, %edx callq hipMallocManaged testl %r14d, %r14d jle .LBB3_6 # %bb.1: # %.lr.ph16 movslq %ebp, %rax leaq (,%rax,4), %r14 movl %r15d, %r15d movl %eax, %r12d xorl %r13d, %r13d jmp .LBB3_2 .p2align 4, 0x90 .LBB3_5: # %._crit_edge # in Loop: Header=BB3_2 Depth=1 incq %r13 cmpq %r15, %r13 je .LBB3_6 .LBB3_2: # =>This Loop Header: Depth=1 # Child Loop BB3_4 Depth 2 leaq (,%r13,8), %rdi addq (%rsp), %rdi movq %r14, %rsi movl $1, %edx callq hipMallocManaged testl %ebp, %ebp jle .LBB3_5 # %bb.3: # %.lr.ph # in Loop: Header=BB3_2 Depth=1 movq (%rsp), %rax movq (%rax,%r13,8), %rax xorl %ecx, %ecx .p2align 4, 0x90 .LBB3_4: # Parent Loop BB3_2 Depth=1 # => This Inner Loop Header: Depth=2 movl %ebx, (%rax,%rcx,4) incq %rcx cmpq %rcx, %r12 jne .LBB3_4 jmp .LBB3_5 .LBB3_6: # %._crit_edge17 movq (%rsp), %rax addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size _Z4initiii, .Lfunc_end3-_Z4initiii .cfi_endproc # -- End function .globl _Z5printPPiii # -- Begin function _Z5printPPiii .p2align 4, 0x90 .type _Z5printPPiii,@function _Z5printPPiii: # @_Z5printPPiii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 pushq %rax .cfi_def_cfa_offset 64 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %edx, %ebx movl %esi, %ebp movq %rdi, %r14 movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $4, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r15 testq %r15, %r15 je .LBB4_19 # %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%r15) je .LBB4_3 # %bb.2: movzbl 67(%r15), %eax jmp .LBB4_4 .LBB4_3: movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r15), %rax movq %r15, %rdi movl $10, %esi callq *48(%rax) .LBB4_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl %ebp, 4(%rsp) # 4-byte Spill testl %ebp, %ebp jle .LBB4_12 # %bb.5: # %.preheader.lr.ph movl 4(%rsp), %r12d # 4-byte Reload movl %ebx, %r13d xorl %ebp, %ebp jmp .LBB4_6 .p2align 4, 0x90 .LBB4_17: # in Loop: Header=BB4_6 Depth=1 movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r15), %rax movq %r15, %rdi movl $10, %esi callq *48(%rax) .LBB4_18: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit21 # in Loop: Header=BB4_6 Depth=1 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv incq %rbp cmpq %r12, %rbp je .LBB4_12 .LBB4_6: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB4_8 Depth 2 testl %ebx, %ebx jle .LBB4_9 # %bb.7: # %.lr.ph # in Loop: Header=BB4_6 Depth=1 xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_8: # Parent Loop BB4_6 Depth=1 # => This Inner Loop Header: Depth=2 movq (%r14,%rbp,8), %rax movl (%rax,%r15,4), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movl $.L.str.2, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l incq %r15 cmpq %r15, %r13 jne .LBB4_8 .LBB4_9: # %._crit_edge # in Loop: Header=BB4_6 Depth=1 movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r15 testq %r15, %r15 je .LBB4_19 # %bb.10: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i18 # in Loop: Header=BB4_6 Depth=1 cmpb $0, 56(%r15) je .LBB4_17 # %bb.11: # in Loop: Header=BB4_6 Depth=1 movzbl 67(%r15), %eax jmp .LBB4_18 .LBB4_12: # %._crit_edge24 movl $_ZSt4cout, %edi movl $.L.str.3, %esi movl $1, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movl 4(%rsp), %esi # 4-byte Reload callq _ZNSolsEi movq %rax, %r14 movl $.L.str.2, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq %r14, %rdi movl %ebx, %esi callq _ZNSolsEi movq %rax, %rbx movl $.L.str.4, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r14 testq %r14, %r14 je .LBB4_19 # %bb.13: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i13 cmpb $0, 56(%r14) je .LBB4_15 # %bb.14: movzbl 67(%r14), %eax jmp .LBB4_16 .LBB4_15: movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) .LBB4_16: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit16 movsbl %al, %esi movq %rbx, %rdi callq _ZNSo3putEc movq %rax, %rdi addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp _ZNSo5flushEv # TAILCALL .LBB4_19: .cfi_def_cfa_offset 64 callq _ZSt16__throw_bad_castv .Lfunc_end4: .size _Z5printPPiii, .Lfunc_end4-_Z5printPPiii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .Lfunc_begin0: .cfi_startproc .cfi_personality 3, __gxx_personality_v0 .cfi_lsda 3, .Lexception0 # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $72, %rsp .cfi_def_cfa_offset 96 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl $_ZSt4cout, %edi movl $.L.str.5, %esi movl $11, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB5_42 # %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%rbx) je .LBB5_3 # %bb.2: movzbl 67(%rbx), %eax jmp .LBB5_4 .LBB5_3: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB5_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq %rsp, %rdi movl $2400, %esi # imm = 0x960 movl $1, %edx callq hipMallocManaged xorl %r14d, %r14d .p2align 4, 0x90 .LBB5_5: # =>This Loop Header: Depth=1 # Child Loop BB5_6 Depth 2 leaq (,%r14,8), %rdi addq (%rsp), %rdi movl $2000, %esi # imm = 0x7D0 movl $1, %edx callq hipMallocManaged movq (%rsp), %rbx movq (%rbx,%r14,8), %rax xorl %ecx, %ecx .p2align 4, 0x90 .LBB5_6: # Parent Loop BB5_5 Depth=1 # => This Inner Loop Header: Depth=2 movl $1, (%rax,%rcx,4) incq %rcx cmpq $500, %rcx # imm = 0x1F4 jne .LBB5_6 # %bb.7: # %._crit_edge.i # in Loop: Header=BB5_5 Depth=1 incq %r14 cmpq $300, %r14 # imm = 0x12C jne .LBB5_5 # %bb.8: # %_Z4initiii.exit movq %rsp, %rdi movl $4000, %esi # imm = 0xFA0 movl $1, %edx callq hipMallocManaged xorl %r14d, %r14d .p2align 4, 0x90 .LBB5_9: # =>This Loop Header: Depth=1 # Child Loop BB5_10 Depth 2 leaq (,%r14,8), %rdi addq (%rsp), %rdi movl $3600, %esi # imm = 0xE10 movl $1, %edx callq hipMallocManaged movq (%rsp), %rax movq (%rax,%r14,8), %rcx xorl %edx, %edx .p2align 4, 0x90 .LBB5_10: # Parent Loop BB5_9 Depth=1 # => This Inner Loop Header: Depth=2 movl $1, (%rcx,%rdx,4) incq %rdx cmpq $900, %rdx # imm = 0x384 jne .LBB5_10 # %bb.11: # %._crit_edge.i20 # in Loop: Header=BB5_9 Depth=1 incq %r14 cmpq $500, %r14 # imm = 0x1F4 jne .LBB5_9 # %bb.12: # %_Z4initiii.exit23 xorps %xmm0, %xmm0 movaps %xmm0, (%rsp) movaps %xmm0, 16(%rsp) movq %rbx, (%rsp) movq %rax, 8(%rsp) movq $_ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E9_M_invokeERKSt9_Any_data, 24(%rsp) movq $_ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E10_M_managerERSt9_Any_dataRKS4_St18_Manager_operation, 16(%rsp) .Ltmp0: movq %rsp, %rdi callq _Z11measureTimeIPPiET_RSt8functionIFPvvEE .Ltmp1: # %bb.13: .Ltmp2: movl $_ZSt4cout, %edi movl $.L.str.6, %esi movl $12, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp3: # %bb.14: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB5_15 # %bb.17: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i36 cmpb $0, 56(%rbx) je .LBB5_19 # %bb.18: movzbl 67(%rbx), %eax jmp .LBB5_21 .LBB5_19: .Ltmp4: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp5: # %bb.20: # %.noexc40 movq (%rbx), %rax .Ltmp6: movq %rbx, %rdi movl $10, %esi callq *48(%rax) .Ltmp7: .LBB5_21: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i .Ltmp8: movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc .Ltmp9: # %bb.22: # %.noexc42 .Ltmp10: movq %rax, %rdi callq _ZNSo5flushEv .Ltmp11: # %bb.23: # %_ZNSolsEPFRSoS_E.exit movabsq $1288490189300, %rax # imm = 0x12C000001F4 movq %rax, 56(%rsp) leaq 64(%rsp), %rdi .Ltmp12: movl $600000, %esi # imm = 0x927C0 movl $1, %edx callq hipMallocManaged .Ltmp13: # %bb.24: # %.noexc movl 56(%rsp), %eax imull 60(%rsp), %eax testl %eax, %eax jle .LBB5_27 # %bb.25: # %.lr.ph.i movq 64(%rsp), %rax xorl %ecx, %ecx .p2align 4, 0x90 .LBB5_26: # =>This Inner Loop Header: Depth=1 movl $1, (%rax,%rcx,4) incq %rcx movslq 60(%rsp), %rdx movslq 56(%rsp), %rsi imulq %rdx, %rsi cmpq %rsi, %rcx jl .LBB5_26 .LBB5_27: # %_ZN6MatrixC2Eiii.exit movabsq $2147483648900, %rax # imm = 0x1F400000384 movq %rax, 40(%rsp) leaq 48(%rsp), %rdi .Ltmp15: movl $1800000, %esi # imm = 0x1B7740 movl $1, %edx callq hipMallocManaged .Ltmp16: # %bb.28: # %.noexc31 movl 40(%rsp), %eax imull 44(%rsp), %eax testl %eax, %eax jle .LBB5_31 # %bb.29: # %.lr.ph.i28 movq 48(%rsp), %rax xorl %ecx, %ecx .p2align 4, 0x90 .LBB5_30: # =>This Inner Loop Header: Depth=1 movl $1, (%rax,%rcx,4) incq %rcx movslq 44(%rsp), %rdx movslq 40(%rsp), %rsi imulq %rdx, %rsi cmpq %rsi, %rcx jl .LBB5_30 .LBB5_31: # %_ZN6MatrixC2Eiii.exit32 .Ltmp18: leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi callq _ZNK6Matrix6matMulERKS_ .Ltmp19: # %bb.32: movq 16(%rsp), %rax testq %rax, %rax je .LBB5_34 # %bb.33: .Ltmp21: movq %rsp, %rdi movq %rdi, %rsi movl $3, %edx callq *%rax .Ltmp22: .LBB5_34: # %_ZNSt14_Function_baseD2Ev.exit xorl %eax, %eax addq $72, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .LBB5_42: .cfi_def_cfa_offset 96 callq _ZSt16__throw_bad_castv .LBB5_15: .Ltmp24: callq _ZSt16__throw_bad_castv .Ltmp25: # %bb.16: # %.noexc39 .LBB5_43: .Ltmp23: movq %rax, %rdi callq __clang_call_terminate .LBB5_38: .Ltmp20: jmp .LBB5_39 .LBB5_37: .Ltmp17: jmp .LBB5_39 .LBB5_36: .Ltmp14: jmp .LBB5_39 .LBB5_35: .Ltmp26: .LBB5_39: movq %rax, %rbx movq 16(%rsp), %rax testq %rax, %rax je .LBB5_41 # %bb.40: .Ltmp27: movq %rsp, %rdi movq %rdi, %rsi movl $3, %edx callq *%rax .Ltmp28: .LBB5_41: # %_ZNSt14_Function_baseD2Ev.exit34 movq %rbx, %rdi callq _Unwind_Resume@PLT .LBB5_44: .Ltmp29: movq %rax, %rdi callq __clang_call_terminate .Lfunc_end5: .size main, .Lfunc_end5-main .cfi_endproc .section .gcc_except_table,"a",@progbits .p2align 2, 0x0 GCC_except_table5: .Lexception0: .byte 255 # @LPStart Encoding = omit .byte 3 # @TType Encoding = udata4 .uleb128 .Lttbase0-.Lttbaseref0 .Lttbaseref0: .byte 1 # Call site Encoding = uleb128 .uleb128 .Lcst_end0-.Lcst_begin0 .Lcst_begin0: .uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 << .uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0 .byte 0 # has no landing pad .byte 0 # On action: cleanup .uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 << .uleb128 .Ltmp11-.Ltmp0 # Call between .Ltmp0 and .Ltmp11 .uleb128 .Ltmp26-.Lfunc_begin0 # jumps to .Ltmp26 .byte 0 # On action: cleanup .uleb128 .Ltmp12-.Lfunc_begin0 # >> Call Site 3 << .uleb128 .Ltmp13-.Ltmp12 # Call between .Ltmp12 and .Ltmp13 .uleb128 .Ltmp14-.Lfunc_begin0 # jumps to .Ltmp14 .byte 0 # On action: cleanup .uleb128 .Ltmp15-.Lfunc_begin0 # >> Call Site 4 << .uleb128 .Ltmp16-.Ltmp15 # Call between .Ltmp15 and .Ltmp16 .uleb128 .Ltmp17-.Lfunc_begin0 # jumps to .Ltmp17 .byte 0 # On action: cleanup .uleb128 .Ltmp18-.Lfunc_begin0 # >> Call Site 5 << .uleb128 .Ltmp19-.Ltmp18 # Call between .Ltmp18 and .Ltmp19 .uleb128 .Ltmp20-.Lfunc_begin0 # jumps to .Ltmp20 .byte 0 # On action: cleanup .uleb128 .Ltmp21-.Lfunc_begin0 # >> Call Site 6 << .uleb128 .Ltmp22-.Ltmp21 # Call between .Ltmp21 and .Ltmp22 .uleb128 .Ltmp23-.Lfunc_begin0 # jumps to .Ltmp23 .byte 1 # On action: 1 .uleb128 .Ltmp22-.Lfunc_begin0 # >> Call Site 7 << .uleb128 .Ltmp24-.Ltmp22 # Call between .Ltmp22 and .Ltmp24 .byte 0 # has no landing pad .byte 0 # On action: cleanup .uleb128 .Ltmp24-.Lfunc_begin0 # >> Call Site 8 << .uleb128 .Ltmp25-.Ltmp24 # Call between .Ltmp24 and .Ltmp25 .uleb128 .Ltmp26-.Lfunc_begin0 # jumps to .Ltmp26 .byte 0 # On action: cleanup .uleb128 .Ltmp27-.Lfunc_begin0 # >> Call Site 9 << .uleb128 .Ltmp28-.Ltmp27 # Call between .Ltmp27 and .Ltmp28 .uleb128 .Ltmp29-.Lfunc_begin0 # jumps to .Ltmp29 .byte 1 # On action: 1 .uleb128 .Ltmp28-.Lfunc_begin0 # >> Call Site 10 << .uleb128 .Lfunc_end5-.Ltmp28 # Call between .Ltmp28 and .Lfunc_end5 .byte 0 # has no landing pad .byte 0 # On action: cleanup .Lcst_end0: .byte 1 # >> Action Record 1 << # Catch TypeInfo 1 .byte 0 # No further actions .p2align 2, 0x0 # >> Catch TypeInfos << .long 0 # TypeInfo 1 .Lttbase0: .p2align 2, 0x0 # -- End function .section .text._Z11measureTimeIPPiET_RSt8functionIFPvvEE,"axG",@progbits,_Z11measureTimeIPPiET_RSt8functionIFPvvEE,comdat .weak _Z11measureTimeIPPiET_RSt8functionIFPvvEE # -- Begin function _Z11measureTimeIPPiET_RSt8functionIFPvvEE .p2align 4, 0x90 .type _Z11measureTimeIPPiET_RSt8functionIFPvvEE,@function _Z11measureTimeIPPiET_RSt8functionIFPvvEE: # @_Z11measureTimeIPPiET_RSt8functionIFPvvEE .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %rbx callq _ZNSt6chrono3_V212system_clock3nowEv cmpq $0, 16(%rbx) je .LBB6_6 # %bb.1: # %_ZNKSt8functionIFPvvEEclEv.exit movq %rax, %r14 movq %rbx, %rdi callq *24(%rbx) movq %rax, %rbx callq _ZNSt6chrono3_V212system_clock3nowEv subq %r14, %rax movabsq $4835703278458516699, %rcx # imm = 0x431BDE82D7B634DB imulq %rcx movq %rdx, %r14 shrq $63, %r14 sarq $18, %rdx addq %rdx, %r14 movl $_ZSt4cout, %edi movl $.L.str.8, %esi movl $15, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movq %r14, %rsi callq _ZNSo9_M_insertIlEERSoT_ movq %rax, %r14 movl $.L.str.9, %esi movl $13, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%r14), %rax movq -24(%rax), %rax movq 240(%r14,%rax), %r15 testq %r15, %r15 je .LBB6_7 # %bb.2: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%r15) je .LBB6_4 # %bb.3: movzbl 67(%r15), %eax jmp .LBB6_5 .LBB6_4: movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r15), %rax movq %r15, %rdi movl $10, %esi callq *48(%rax) .LBB6_5: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %al, %esi movq %r14, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq %rbx, %rax popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB6_6: .cfi_def_cfa_offset 32 callq _ZSt25__throw_bad_function_callv .LBB6_7: callq _ZSt16__throw_bad_castv .Lfunc_end6: .size _Z11measureTimeIPPiET_RSt8functionIFPvvEE, .Lfunc_end6-_Z11measureTimeIPPiET_RSt8functionIFPvvEE .cfi_endproc # -- End function .section .text._ZNK6Matrix6matMulERKS_,"axG",@progbits,_ZNK6Matrix6matMulERKS_,comdat .weak _ZNK6Matrix6matMulERKS_ # -- Begin function _ZNK6Matrix6matMulERKS_ .p2align 4, 0x90 .type _ZNK6Matrix6matMulERKS_,@function _ZNK6Matrix6matMulERKS_: # @_ZNK6Matrix6matMulERKS_ .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $176, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl (%rdi), %eax cmpl 4(%rsi), %eax jne .LBB7_4 # %bb.1: movq %rsi, %rbx movq %rdi, %r14 movl 4(%rdi), %eax movl (%rsi), %ecx movl %ecx, 24(%rsp) # kill: def $ecx killed $ecx def $rcx imull %eax, %ecx leal 255(%rcx), %edx leal 510(%rcx), %r15d testl %edx, %edx cmovnsl %edx, %r15d sarl $8, %r15d movl %eax, 28(%rsp) leaq 32(%rsp), %rdi movslq %ecx, %rsi shlq $2, %rsi movl $1, %edx callq hipMallocManaged movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %r15 orq $256, %rdx # imm = 0x100 movq %r15, %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB7_3 # %bb.2: movq 8(%r14), %rax movl (%r14), %ecx movl 4(%r14), %edx movq 8(%rbx), %rsi movl (%rbx), %edi movl 4(%rbx), %r8d movq 32(%rsp), %r9 movq %rax, 104(%rsp) movl %edx, 20(%rsp) movl %ecx, 16(%rsp) movq %rsi, 96(%rsp) movl %r8d, 12(%rsp) movl %edi, 8(%rsp) movq %r9, 88(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 20(%rsp), %rax movq %rax, 120(%rsp) leaq 16(%rsp), %rax movq %rax, 128(%rsp) leaq 96(%rsp), %rax movq %rax, 136(%rsp) leaq 12(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) leaq 88(%rsp), %rax movq %rax, 160(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z15cudaMatMulClassPKiiiS0_iiPi, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB7_3: callq hipDeviceSynchronize movq 24(%rsp), %rax movq 32(%rsp), %rdx addq $176, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB7_4: .cfi_def_cfa_offset 208 movl $_ZSt4cout, %edi movl $.L.str.7, %esi callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movq %rax, %rdi callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_ movl $4, %edi callq __cxa_allocate_exception movl $0, (%rax) movl $_ZTIi, %esi movq %rax, %rdi xorl %edx, %edx callq __cxa_throw .Lfunc_end7: .size _ZNK6Matrix6matMulERKS_, .Lfunc_end7-_ZNK6Matrix6matMulERKS_ .cfi_endproc # -- End function .section .text.__clang_call_terminate,"axG",@progbits,__clang_call_terminate,comdat .hidden __clang_call_terminate # -- Begin function __clang_call_terminate .weak __clang_call_terminate .p2align 4, 0x90 .type __clang_call_terminate,@function __clang_call_terminate: # @__clang_call_terminate .cfi_startproc # %bb.0: pushq %rax .cfi_def_cfa_offset 16 callq __cxa_begin_catch callq _ZSt9terminatev .Lfunc_end8: .size __clang_call_terminate, .Lfunc_end8-__clang_call_terminate .cfi_endproc # -- End function .text .p2align 4, 0x90 # -- Begin function _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E9_M_invokeERKSt9_Any_data .type _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E9_M_invokeERKSt9_Any_data,@function _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E9_M_invokeERKSt9_Any_data: # @_ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E9_M_invokeERKSt9_Any_data .cfi_startproc # %bb.0: movq (%rdi), %rax movq 8(%rdi), %rcx movq %rax, %rdi movl $300, %esi # imm = 0x12C movl $500, %edx # imm = 0x1F4 movl $500, %r8d # imm = 0x1F4 movl $900, %r9d # imm = 0x384 jmp _Z6matMulPPiiiS0_ii # TAILCALL .Lfunc_end9: .size _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E9_M_invokeERKSt9_Any_data, .Lfunc_end9-_ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E9_M_invokeERKSt9_Any_data .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E10_M_managerERSt9_Any_dataRKS4_St18_Manager_operation .type _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E10_M_managerERSt9_Any_dataRKS4_St18_Manager_operation,@function _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E10_M_managerERSt9_Any_dataRKS4_St18_Manager_operation: # @_ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E10_M_managerERSt9_Any_dataRKS4_St18_Manager_operation .cfi_startproc # %bb.0: cmpl $2, %edx je .LBB10_5 # %bb.1: cmpl $1, %edx je .LBB10_4 # %bb.2: testl %edx, %edx jne .LBB10_6 # %bb.3: movq $_ZTIZ4mainEUlvE_, (%rdi) xorl %eax, %eax retq .LBB10_4: movq %rsi, (%rdi) xorl %eax, %eax retq .LBB10_5: movups (%rsi), %xmm0 movups %xmm0, (%rdi) .LBB10_6: # %_ZNSt14_Function_base13_Base_managerIZ4mainEUlvE_E10_M_managerERSt9_Any_dataRKS3_St18_Manager_operation.exit xorl %eax, %eax retq .Lfunc_end10: .size _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E10_M_managerERSt9_Any_dataRKS4_St18_Manager_operation, .Lfunc_end10-_ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E10_M_managerERSt9_Any_dataRKS4_St18_Manager_operation .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB11_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB11_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z15cudaMatMulClassPKiiiS0_iiPi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10cudaMatMulPPiiiS0_iiS0_, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end11: .size __hip_module_ctor, .Lfunc_end11-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB12_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB12_2: retq .Lfunc_end12: .size __hip_module_dtor, .Lfunc_end12-__hip_module_dtor .cfi_endproc # -- End function .type _Z15cudaMatMulClassPKiiiS0_iiPi,@object # @_Z15cudaMatMulClassPKiiiS0_iiPi .section .rodata,"a",@progbits .globl _Z15cudaMatMulClassPKiiiS0_iiPi .p2align 3, 0x0 _Z15cudaMatMulClassPKiiiS0_iiPi: .quad _Z30__device_stub__cudaMatMulClassPKiiiS0_iiPi .size _Z15cudaMatMulClassPKiiiS0_iiPi, 8 .type _Z10cudaMatMulPPiiiS0_iiS0_,@object # @_Z10cudaMatMulPPiiiS0_iiS0_ .globl _Z10cudaMatMulPPiiiS0_iiS0_ .p2align 3, 0x0 _Z10cudaMatMulPPiiiS0_iiS0_: .quad _Z25__device_stub__cudaMatMulPPiiiS0_iiS0_ .size _Z10cudaMatMulPPiiiS0_iiS0_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "dimenstion don't fit" .size .L.str, 21 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "A = " .size .L.str.1, 5 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "," .size .L.str.2, 2 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "(" .size .L.str.3, 2 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz ")" .size .L.str.4, 2 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "gpu version" .size .L.str.5, 12 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "the new one " .size .L.str.6, 13 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "dimenstion don't fit " .size .L.str.7, 22 .type _ZTSZ4mainEUlvE_,@object # @_ZTSZ4mainEUlvE_ .section .rodata,"a",@progbits _ZTSZ4mainEUlvE_: .asciz "Z4mainEUlvE_" .size _ZTSZ4mainEUlvE_, 13 .type _ZTIZ4mainEUlvE_,@object # @_ZTIZ4mainEUlvE_ .p2align 3, 0x0 _ZTIZ4mainEUlvE_: .quad _ZTVN10__cxxabiv117__class_type_infoE+16 .quad _ZTSZ4mainEUlvE_ .size _ZTIZ4mainEUlvE_, 16 .type .L.str.8,@object # @.str.8 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.8: .asciz "execution took " .size .L.str.8, 16 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz " milliseconds" .size .L.str.9, 14 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z15cudaMatMulClassPKiiiS0_iiPi" .size .L__unnamed_1, 32 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z10cudaMatMulPPiiiS0_iiS0_" .size .L__unnamed_2, 28 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z30__device_stub__cudaMatMulClassPKiiiS0_iiPi .addrsig_sym _Z25__device_stub__cudaMatMulPPiiiS0_iiS0_ .addrsig_sym __gxx_personality_v0 .addrsig_sym _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E9_M_invokeERKSt9_Any_data .addrsig_sym _ZNSt17_Function_handlerIFPvvEZ4mainEUlvE_E10_M_managerERSt9_Any_dataRKS4_St18_Manager_operation .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Unwind_Resume .addrsig_sym _Z15cudaMatMulClassPKiiiS0_iiPi .addrsig_sym _Z10cudaMatMulPPiiiS0_iiS0_ .addrsig_sym _ZSt4cout .addrsig_sym _ZTIi .addrsig_sym _ZTVN10__cxxabiv117__class_type_infoE .addrsig_sym _ZTSZ4mainEUlvE_ .addrsig_sym _ZTIZ4mainEUlvE_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); cudaError_t barycentricCuda(const float3 *v0, const float3 *v1, const float3 *v2, const float *da, const float *db, const float *dc, float *dOut, int2 framebufferSize); __device__ __inline__ float dot(const float2 a, const float2 b) { return (a.x * b.x) + (a.y * b.y); } __device__ float2 calculatePosition(int x, int y, float width, float height) { float2 fragSize = make_float2(2 / width, 2 / height); return make_float2(fragSize.x * x + fragSize.y / 2 - 1, (fragSize.y * y + fragSize.y / 2 - 1) * -1); } __global__ void baryKernel(const float3 *v0, const float3 *v1, const float3 *v2, const float *da, const float *db, const float *dc, float *dOut, int *width, int *height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < *width && y < *height) { float2 pos = calculatePosition(x, y, *width, *height); float2 t0 = make_float2(v2->x, v2->y); float2 t1 = make_float2(v0->x, v0->y); float2 t2 = make_float2(v1->x, v1->y); float2 v0 = make_float2(t1.x - t0.x, t1.y - t0.y); float2 v1 = make_float2(t2.x - t0.x, t2.y - t0.y); float2 v2 = make_float2(pos.x - t0.x, pos.y - t0.y); float d00 = dot(v0, v0); float d01 = dot(v0, v1); float d11 = dot(v1, v1); float d20 = dot(v2, v0); float d21 = dot(v2, v1); float denom = d00 * d11 - d01 * d01; float baryX = (d11 * d20 - d01 * d21) / denom; float baryY = (d00 * d21 - d01 * d20) / denom; float baryZ = 1 - baryX - baryY; if (baryX > 0 && baryY > 0 && baryZ > 0) { dOut[y * *width + x] = *da * baryX + *db * baryY + *dc * baryZ; } else { dOut[y * *width + x] = 0; } } } int main() { printf("\n\n\nBarycentric:\n"); int2 framebufferSize = make_int2(50, 50); float3 bary_v0 = make_float3(0, 1, 0); float3 bary_v1 = make_float3(1, -1, 0); float3 bary_v2 = make_float3(-1, -1, 0); float bary_da = 3; float bary_db = 2; float bary_dc = 1; float *bary_dOut = (float*)malloc(framebufferSize.x * framebufferSize.y * sizeof(float*)); // Barycentric in parallel. cudaError_t cudaStatus = barycentricCuda(&bary_v0, &bary_v1, &bary_v2, &bary_da, &bary_db, &bary_dc, bary_dOut, framebufferSize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "barycentricCuda failed!"); return 1; } printf("{\n"); for (int y = 0; y < framebufferSize.y; y++) { printf(" {"); for (int x = 0; x < framebufferSize.x; x++) { printf("%.1f|", bary_dOut[x + y * framebufferSize.y]); } printf("}\n"); } printf("}\n"); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } #define myMalloc(VAR, SIZE, TYPE) cudaStatus = cudaMalloc((void**)&VAR, SIZE * sizeof(TYPE)); \ if (cudaStatus != cudaSuccess) {\ fprintf(stderr, "cudaMalloc failed!"); \ goto Error; \ } #define myVarOnGPU(VAR, SOURCEVAR, SIZE, TYPE) cudaStatus = cudaMalloc((void**)&VAR, SIZE * sizeof(TYPE));\ if (cudaStatus != cudaSuccess) {\ fprintf(stderr, "cudaMalloc failed!");\ goto Error;\ }\ cudaStatus = cudaMemcpy(VAR, SOURCEVAR, SIZE * sizeof(TYPE), cudaMemcpyHostToDevice);\ if (cudaStatus != cudaSuccess) {\ fprintf(stderr, "cudaMemcpy failed!");\ goto Error;\ } // Helper function for using CUDA to add vectors in parallel. cudaError_t barycentricCuda(const float3 *v0, const float3 *v1, const float3 *v2, const float *da, const float *db, const float *dc, float *dOut, int2 framebufferSize) { int length = framebufferSize.x * framebufferSize.y; int bytes = length * sizeof(float); const dim3 windowSize(framebufferSize.x, framebufferSize.y); const dim3 blockSize(16, 16, 1); const dim3 gridSize(windowSize.x / blockSize.x + 1, windowSize.y / blockSize.y + 1); float3 *dev_v0 = 0; float3 *dev_v1 = 0; float3 *dev_v2 = 0; float *dev_da = 0; float *dev_db = 0; float *dev_dc = 0; struct cudaPitchedPtr dstGPU; int *dev_width = 0; int *dev_height = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate and populate GPU buffers for vectors and data. myVarOnGPU(dev_v0, v0, 1, float3); myVarOnGPU(dev_v1, v1, 1, float3); myVarOnGPU(dev_v2, v2, 1, float3); myVarOnGPU(dev_da, da, 1, float); myVarOnGPU(dev_db, db, 1, float); myVarOnGPU(dev_dc, dc, 1, float); myVarOnGPU(dev_width, &framebufferSize.x, 1, int); myVarOnGPU(dev_height, &framebufferSize.y, 1, int); cudaStatus = cudaMalloc3D(&dstGPU, make_cudaExtent(framebufferSize.x * sizeof(float), framebufferSize.y, 1)); // Launch a kernel on the GPU with one thread for each element. baryKernel <<<gridSize, blockSize>>> (dev_v0, dev_v1, dev_v2, dev_da, dev_db, dev_dc, (float *)dstGPU.ptr, dev_width, dev_height); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "barycentricCuda launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching barycentricCuda!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(dOut, dstGPU.ptr, bytes, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dstGPU.ptr); cudaFree(dev_dc); cudaFree(dev_db); cudaFree(dev_da); cudaFree(dev_v2); cudaFree(dev_v0); cudaFree(dev_v1); return cudaStatus; }
code for sm_80 Function : _Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x198] ; /* 0x00006600ff047624 */ /* 0x000fe200078e00ff */ /*0020*/ MOV R5, c[0x0][0x19c] ; /* 0x0000670000057a02 */ /* 0x000fe20000000f00 */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc80000000a00 */ /*0040*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000ea8000c1e1900 */ /*0050*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e280000002500 */ /*0060*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */ /* 0x000e240000002100 */ /*0070*/ IMAD R3, R3, c[0x0][0x0], R2 ; /* 0x0000000003037a24 */ /* 0x001fc400078e0202 */ /*0080*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */ /* 0x000e260000002600 */ /*0090*/ ISETP.GE.U32.AND P0, PT, R3, R0, PT ; /* 0x000000000300720c */ /* 0x004fda0003f06070 */ /*00a0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00b0*/ MOV R5, c[0x0][0x1a4] ; /* 0x0000690000057a02 */ /* 0x001fe20000000f00 */ /*00c0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x1a0] ; /* 0x00006800ff047624 */ /* 0x000fca00078e00ff */ /*00d0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea8000c1e1900 */ /*00e0*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */ /* 0x000e240000002200 */ /*00f0*/ IMAD R2, R2, c[0x0][0x4], R7 ; /* 0x0000010002027a24 */ /* 0x001fca00078e0207 */ /*0100*/ ISETP.GE.U32.AND P0, PT, R2, R5, PT ; /* 0x000000050200720c */ /* 0x004fda0003f06070 */ /*0110*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0120*/ I2F R6, R0 ; /* 0x0000000000067306 */ /* 0x000e220000201400 */ /*0130*/ UMOV UR6, 0x40000000 ; /* 0x4000000000067882 */ /* 0x000fe40000000000 */ /*0140*/ IMAD.U32 R9, RZ, RZ, UR6 ; /* 0x00000006ff097e24 */ /* 0x000fca000f8e00ff */ /*0150*/ MUFU.RCP R7, R6 ; /* 0x0000000600077308 */ /* 0x001e300000001000 */ /*0160*/ FCHK P0, R9, R6 ; /* 0x0000000609007302 */ /* 0x000e620000000000 */ /*0170*/ FFMA R4, -R6, R7, 1 ; /* 0x3f80000006047423 */ /* 0x001fc80000000107 */ /*0180*/ FFMA R4, R7, R4, R7 ; /* 0x0000000407047223 */ /* 0x000fc80000000007 */ /*0190*/ FFMA R7, R4, 2, RZ ; /* 0x4000000004077823 */ /* 0x000fc800000000ff */ /*01a0*/ FFMA R8, -R6, R7, 2 ; /* 0x4000000006087423 */ /* 0x000fc80000000107 */ /*01b0*/ FFMA R4, R4, R8, R7 ; /* 0x0000000804047223 */ /* 0x000fe20000000007 */ /*01c0*/ @!P0 BRA 0x220 ; /* 0x0000005000008947 */ /* 0x002fea0003800000 */ /*01d0*/ MOV R7, R6 ; /* 0x0000000600077202 */ /* 0x000fe20000000f00 */ /*01e0*/ IMAD.MOV.U32 R11, RZ, RZ, 0x40000000 ; /* 0x40000000ff0b7424 */ /* 0x000fe200078e00ff */ /*01f0*/ MOV R10, 0x210 ; /* 0x00000210000a7802 */ /* 0x000fe40000000f00 */ /*0200*/ CALL.REL.NOINC 0x8d0 ; /* 0x000006c000007944 */ /* 0x000fea0003c00000 */ /*0210*/ MOV R4, R9 ; /* 0x0000000900047202 */ /* 0x000fe40000000f00 */ /*0220*/ I2F R5, R5 ; /* 0x0000000500057306 */ /* 0x000e220000201400 */ /*0230*/ UMOV UR6, 0x40000000 ; /* 0x4000000000067882 */ /* 0x000fe40000000000 */ /*0240*/ IMAD.U32 R10, RZ, RZ, UR6 ; /* 0x00000006ff0a7e24 */ /* 0x000fca000f8e00ff */ /*0250*/ MUFU.RCP R6, R5 ; /* 0x0000000500067308 */ /* 0x001e300000001000 */ /*0260*/ FCHK P0, R10, R5 ; /* 0x000000050a007302 */ /* 0x000e620000000000 */ /*0270*/ FFMA R7, -R5, R6, 1 ; /* 0x3f80000005077423 */ /* 0x001fc80000000106 */ /*0280*/ FFMA R7, R6, R7, R6 ; /* 0x0000000706077223 */ /* 0x000fc80000000006 */ /*0290*/ FFMA R6, R7, 2, RZ ; /* 0x4000000007067823 */ /* 0x000fc800000000ff */ /*02a0*/ FFMA R8, -R5, R6, 2 ; /* 0x4000000005087423 */ /* 0x000fc80000000106 */ /*02b0*/ FFMA R7, R7, R8, R6 ; /* 0x0000000807077223 */ /* 0x000fe20000000006 */ /*02c0*/ @!P0 BRA 0x320 ; /* 0x0000005000008947 */ /* 0x002fea0003800000 */ /*02d0*/ MOV R7, R5 ; /* 0x0000000500077202 */ /* 0x000fe20000000f00 */ /*02e0*/ IMAD.MOV.U32 R11, RZ, RZ, 0x40000000 ; /* 0x40000000ff0b7424 */ /* 0x000fe200078e00ff */ /*02f0*/ MOV R10, 0x310 ; /* 0x00000310000a7802 */ /* 0x000fe40000000f00 */ /*0300*/ CALL.REL.NOINC 0x8d0 ; /* 0x000005c000007944 */ /* 0x000fea0003c00000 */ /*0310*/ MOV R7, R9 ; /* 0x0000000900077202 */ /* 0x000fe40000000f00 */ /*0320*/ IMAD.MOV.U32 R16, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff107624 */ /* 0x000fe200078e00ff */ /*0330*/ MOV R17, c[0x0][0x174] ; /* 0x00005d0000117a02 */ /* 0x000fe20000000f00 */ /*0340*/ IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff0e7624 */ /* 0x000fe200078e00ff */ /*0350*/ MOV R15, c[0x0][0x164] ; /* 0x00005900000f7a02 */ /* 0x000fe20000000f00 */ /*0360*/ IMAD.MOV.U32 R18, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff127624 */ /* 0x000fe200078e00ff */ /*0370*/ MOV R19, c[0x0][0x16c] ; /* 0x00005b0000137a02 */ /* 0x000fe20000000f00 */ /*0380*/ LDG.E R8, [R16.64+0x4] ; /* 0x0000040410087981 */ /* 0x000ea8000c1e1900 */ /*0390*/ LDG.E R5, [R14.64+0x4] ; /* 0x000004040e057981 */ /* 0x0000a8000c1e1900 */ /*03a0*/ LDG.E R13, [R18.64+0x4] ; /* 0x00000404120d7981 */ /* 0x000ee8000c1e1900 */ /*03b0*/ LDG.E R9, [R16.64] ; /* 0x0000000410097981 */ /* 0x000328000c1e1900 */ /*03c0*/ LDG.E R6, [R14.64] ; /* 0x000000040e067981 */ /* 0x000128000c1e1900 */ /*03d0*/ LDG.E R12, [R18.64] ; /* 0x00000004120c7981 */ /* 0x000f62000c1e1900 */ /*03e0*/ I2F R20, R2 ; /* 0x0000000200147306 */ /* 0x000e620000201400 */ /*03f0*/ BSSY B0, 0x660 ; /* 0x0000026000007945 */ /* 0x000fe20003800000 */ /*0400*/ FMUL R14, R7, 0.5 ; /* 0x3f000000070e7820 */ /* 0x001fcc0000400000 */ /*0410*/ I2F R21, R3 ; /* 0x0000000300157306 */ /* 0x000e220000201400 */ /*0420*/ FFMA R20, R20, R7, R14.reuse ; /* 0x0000000714147223 */ /* 0x102fe4000000000e */ /*0430*/ FFMA R4, R21, R4, R14 ; /* 0x0000000415047223 */ /* 0x001fc8000000000e */ /*0440*/ FADD R4, R4, -1 ; /* 0xbf80000004047421 */ /* 0x000fe40000000000 */ /*0450*/ FADD R10, R5, -R8 ; /* 0x80000008050a7221 */ /* 0x004fe40000000000 */ /*0460*/ FADD R13, -R8, R13 ; /* 0x0000000d080d7221 */ /* 0x008fc80000000100 */ /*0470*/ FMUL R5, R10, R13.reuse ; /* 0x0000000d0a057220 */ /* 0x080fe40000400000 */ /*0480*/ FMUL R17, R13, R13 ; /* 0x0000000d0d117220 */ /* 0x000fe40000400000 */ /*0490*/ FADD R11, R6, -R9 ; /* 0x80000009060b7221 */ /* 0x010fe40000000000 */ /*04a0*/ FMUL R6, R10, R10 ; /* 0x0000000a0a067220 */ /* 0x000fe40000400000 */ /*04b0*/ FADD R12, -R9, R12 ; /* 0x0000000c090c7221 */ /* 0x020fe40000000100 */ /*04c0*/ FFMA R6, R11, R11, R6 ; /* 0x0000000b0b067223 */ /* 0x000fc40000000006 */ /*04d0*/ FFMA R5, R11, R12.reuse, R5 ; /* 0x0000000c0b057223 */ /* 0x080fe40000000005 */ /*04e0*/ FFMA R15, R12, R12, R17 ; /* 0x0000000c0c0f7223 */ /* 0x000fe40000000011 */ /*04f0*/ FADD R17, R20, -1 ; /* 0xbf80000014117421 */ /* 0x000fe40000000000 */ /*0500*/ FMUL R16, R5, R5 ; /* 0x0000000505107220 */ /* 0x000fe40000400000 */ /*0510*/ FADD R17, -R17, -R8 ; /* 0x8000000811117221 */ /* 0x000fe40000000100 */ /*0520*/ FFMA R7, R6, R15, -R16 ; /* 0x0000000f06077223 */ /* 0x000fc40000000810 */ /*0530*/ FADD R4, R4, -R9 ; /* 0x8000000904047221 */ /* 0x000fe40000000000 */ /*0540*/ FMUL R13, R13, R17.reuse ; /* 0x000000110d0d7220 */ /* 0x080fe20000400000 */ /*0550*/ MUFU.RCP R14, R7 ; /* 0x00000007000e7308 */ /* 0x000e220000001000 */ /*0560*/ FMUL R8, R10, R17 ; /* 0x000000110a087220 */ /* 0x000fe40000400000 */ /*0570*/ FFMA R12, R12, R4.reuse, R13 ; /* 0x000000040c0c7223 */ /* 0x080fe4000000000d */ /*0580*/ FFMA R8, R11, R4, R8 ; /* 0x000000040b087223 */ /* 0x000fe40000000008 */ /*0590*/ FMUL R4, R5, R12 ; /* 0x0000000c05047220 */ /* 0x000fc80000400000 */ /*05a0*/ FFMA R11, R15, R8, -R4 ; /* 0x000000080f0b7223 */ /* 0x000fc80000000804 */ /*05b0*/ FCHK P0, R11, R7 ; /* 0x000000070b007302 */ /* 0x000e620000000000 */ /*05c0*/ FFMA R9, -R7, R14, 1 ; /* 0x3f80000007097423 */ /* 0x001fc8000000010e */ /*05d0*/ FFMA R9, R14, R9, R14 ; /* 0x000000090e097223 */ /* 0x000fc8000000000e */ /*05e0*/ FFMA R4, R11, R9, RZ ; /* 0x000000090b047223 */ /* 0x000fc800000000ff */ /*05f0*/ FFMA R10, -R7, R4, R11 ; /* 0x00000004070a7223 */ /* 0x000fc8000000010b */ /*0600*/ FFMA R4, R9, R10, R4 ; /* 0x0000000a09047223 */ /* 0x000fe20000000004 */ /*0610*/ @!P0 BRA 0x650 ; /* 0x0000003000008947 */ /* 0x002fea0003800000 */ /*0620*/ MOV R10, 0x640 ; /* 0x00000640000a7802 */ /* 0x000fe40000000f00 */ /*0630*/ CALL.REL.NOINC 0x8d0 ; /* 0x0000029000007944 */ /* 0x000fea0003c00000 */ /*0640*/ IMAD.MOV.U32 R4, RZ, RZ, R9 ; /* 0x000000ffff047224 */ /* 0x000fe400078e0009 */ /*0650*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0660*/ MUFU.RCP R10, R7 ; /* 0x00000007000a7308 */ /* 0x000e220000001000 */ /*0670*/ FMUL R5, R5, R8 ; /* 0x0000000805057220 */ /* 0x000fe20000400000 */ /*0680*/ BSSY B0, 0x750 ; /* 0x000000c000007945 */ /* 0x000fe60003800000 */ /*0690*/ FFMA R11, R6, R12, -R5 ; /* 0x0000000c060b7223 */ /* 0x000fc80000000805 */ /*06a0*/ FCHK P0, R11, R7 ; /* 0x000000070b007302 */ /* 0x000e620000000000 */ /*06b0*/ FFMA R9, -R7, R10, 1 ; /* 0x3f80000007097423 */ /* 0x001fc8000000010a */ /*06c0*/ FFMA R10, R10, R9, R10 ; /* 0x000000090a0a7223 */ /* 0x000fc8000000000a */ /*06d0*/ FFMA R6, R10, R11, RZ ; /* 0x0000000b0a067223 */ /* 0x000fc800000000ff */ /*06e0*/ FFMA R5, -R7, R6, R11 ; /* 0x0000000607057223 */ /* 0x000fc8000000010b */ /*06f0*/ FFMA R5, R10, R5, R6 ; /* 0x000000050a057223 */ /* 0x000fe20000000006 */ /*0700*/ @!P0 BRA 0x740 ; /* 0x0000003000008947 */ /* 0x002fea0003800000 */ /*0710*/ MOV R10, 0x730 ; /* 0x00000730000a7802 */ /* 0x000fe40000000f00 */ /*0720*/ CALL.REL.NOINC 0x8d0 ; /* 0x000001a000007944 */ /* 0x000fea0003c00000 */ /*0730*/ MOV R5, R9 ; /* 0x0000000900057202 */ /* 0x000fe40000000f00 */ /*0740*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0750*/ FSETP.GT.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720b */ /* 0x000fe20003f04000 */ /*0760*/ FADD R12, -R4, 1 ; /* 0x3f800000040c7421 */ /* 0x000fe20000000100 */ /*0770*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0780*/ IMAD R2, R2, R0, R3 ; /* 0x0000000002027224 */ /* 0x000fe200078e0203 */ /*0790*/ FSETP.GT.AND P0, PT, R4, RZ, P0 ; /* 0x000000ff0400720b */ /* 0x000fc80000704000 */ /*07a0*/ FSETP.GT.AND P0, PT, R12, R5, P0 ; /* 0x000000050c00720b */ /* 0x000fc80000704000 */ /*07b0*/ IMAD.WIDE.U32 R2, R2, R7, c[0x0][0x190] ; /* 0x0000640002027625 */ /* 0x000fd200078e0007 */ /*07c0*/ @!P0 STG.E [R2.64], RZ ; /* 0x000000ff02008986 */ /* 0x0001e2000c101904 */ /*07d0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*07e0*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff087624 */ /* 0x000fe200078e00ff */ /*07f0*/ MOV R9, c[0x0][0x184] ; /* 0x0000610000097a02 */ /* 0x000fe20000000f00 */ /*0800*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff077624 */ /* 0x000fe200078e00ff */ /*0810*/ MOV R6, c[0x0][0x178] ; /* 0x00005e0000067a02 */ /* 0x000fc60000000f00 */ /*0820*/ LDG.E R8, [R8.64] ; /* 0x0000000408087981 */ /* 0x000ea2000c1e1900 */ /*0830*/ MOV R11, c[0x0][0x18c] ; /* 0x00006300000b7a02 */ /* 0x000fe40000000f00 */ /*0840*/ MOV R10, c[0x0][0x188] ; /* 0x00006200000a7a02 */ /* 0x000fe20000000f00 */ /*0850*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */ /* 0x000ee8000c1e1900 */ /*0860*/ LDG.E R11, [R10.64] ; /* 0x000000040a0b7981 */ /* 0x000f22000c1e1900 */ /*0870*/ FMUL R0, R8, R5 ; /* 0x0000000508007220 */ /* 0x004fe40000400000 */ /*0880*/ FADD R5, R12, -R5 ; /* 0x800000050c057221 */ /* 0x000fc40000000000 */ /*0890*/ FFMA R0, R7, R4, R0 ; /* 0x0000000407007223 */ /* 0x008fc80000000000 */ /*08a0*/ FFMA R5, R5, R11, R0 ; /* 0x0000000b05057223 */ /* 0x010fca0000000000 */ /*08b0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*08c0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*08d0*/ SHF.R.U32.HI R13, RZ, 0x17, R7 ; /* 0x00000017ff0d7819 */ /* 0x000fe20000011607 */ /*08e0*/ BSSY B1, 0xf20 ; /* 0x0000063000017945 */ /* 0x000fe20003800000 */ /*08f0*/ SHF.R.U32.HI R9, RZ, 0x17, R11 ; /* 0x00000017ff097819 */ /* 0x000fe2000001160b */ /*0900*/ IMAD.MOV.U32 R14, RZ, RZ, R7 ; /* 0x000000ffff0e7224 */ /* 0x000fe200078e0007 */ /*0910*/ LOP3.LUT R13, R13, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff0d0d7812 */ /* 0x000fe400078ec0ff */ /*0920*/ LOP3.LUT R16, R9, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff09107812 */ /* 0x000fe400078ec0ff */ /*0930*/ IADD3 R17, R13, -0x1, RZ ; /* 0xffffffff0d117810 */ /* 0x000fe40007ffe0ff */ /*0940*/ IADD3 R15, R16, -0x1, RZ ; /* 0xffffffff100f7810 */ /* 0x000fc40007ffe0ff */ /*0950*/ ISETP.GT.U32.AND P0, PT, R17, 0xfd, PT ; /* 0x000000fd1100780c */ /* 0x000fc80003f04070 */ /*0960*/ ISETP.GT.U32.OR P0, PT, R15, 0xfd, P0 ; /* 0x000000fd0f00780c */ /* 0x000fda0000704470 */ /*0970*/ @!P0 MOV R9, RZ ; /* 0x000000ff00098202 */ /* 0x000fe20000000f00 */ /*0980*/ @!P0 BRA 0xb00 ; /* 0x0000017000008947 */ /* 0x000fea0003800000 */ /*0990*/ FSETP.GTU.FTZ.AND P0, PT, |R11|, +INF , PT ; /* 0x7f8000000b00780b */ /* 0x000fe40003f1c200 */ /*09a0*/ FSETP.GTU.FTZ.AND P1, PT, |R7|, +INF , PT ; /* 0x7f8000000700780b */ /* 0x000fc80003f3c200 */ /*09b0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */ /* 0x000fda0000703570 */ /*09c0*/ @P0 BRA 0xf00 ; /* 0x0000053000000947 */ /* 0x000fea0003800000 */ /*09d0*/ LOP3.LUT P0, RZ, R14, 0x7fffffff, R11, 0xc8, !PT ; /* 0x7fffffff0eff7812 */ /* 0x000fda000780c80b */ /*09e0*/ @!P0 BRA 0xee0 ; /* 0x000004f000008947 */ /* 0x000fea0003800000 */ /*09f0*/ FSETP.NEU.FTZ.AND P2, PT, |R11|, +INF , PT ; /* 0x7f8000000b00780b */ /* 0x000fe40003f5d200 */ /*0a00*/ FSETP.NEU.FTZ.AND P1, PT, |R7|, +INF , PT ; /* 0x7f8000000700780b */ /* 0x000fe40003f3d200 */ /*0a10*/ FSETP.NEU.FTZ.AND P0, PT, |R11|, +INF , PT ; /* 0x7f8000000b00780b */ /* 0x000fd60003f1d200 */ /*0a20*/ @!P1 BRA !P2, 0xee0 ; /* 0x000004b000009947 */ /* 0x000fea0005000000 */ /*0a30*/ LOP3.LUT P2, RZ, R11, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff0bff7812 */ /* 0x000fc8000784c0ff */ /*0a40*/ PLOP3.LUT P1, PT, P1, P2, PT, 0x2a, 0x0 ; /* 0x000000000000781c */ /* 0x000fda0000f24572 */ /*0a50*/ @P1 BRA 0xec0 ; /* 0x0000046000001947 */ /* 0x000fea0003800000 */ /*0a60*/ LOP3.LUT P1, RZ, R14, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff0eff7812 */ /* 0x000fc8000782c0ff */ /*0a70*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x2a, 0x0 ; /* 0x000000000000781c */ /* 0x000fda0000702572 */ /*0a80*/ @P0 BRA 0xe90 ; /* 0x0000040000000947 */ /* 0x000fea0003800000 */ /*0a90*/ ISETP.GE.AND P0, PT, R15, RZ, PT ; /* 0x000000ff0f00720c */ /* 0x000fe40003f06270 */ /*0aa0*/ ISETP.GE.AND P1, PT, R17, RZ, PT ; /* 0x000000ff1100720c */ /* 0x000fd60003f26270 */ /*0ab0*/ @P0 MOV R9, RZ ; /* 0x000000ff00090202 */ /* 0x000fe20000000f00 */ /*0ac0*/ @!P0 IMAD.MOV.U32 R9, RZ, RZ, -0x40 ; /* 0xffffffc0ff098424 */ /* 0x000fe400078e00ff */ /*0ad0*/ @!P0 FFMA R11, R11, 1.84467440737095516160e+19, RZ ; /* 0x5f8000000b0b8823 */ /* 0x000fe400000000ff */ /*0ae0*/ @!P1 FFMA R14, R7, 1.84467440737095516160e+19, RZ ; /* 0x5f800000070e9823 */ /* 0x000fe200000000ff */ /*0af0*/ @!P1 IADD3 R9, R9, 0x40, RZ ; /* 0x0000004009099810 */ /* 0x000fe40007ffe0ff */ /*0b00*/ LEA R15, R13, 0xc0800000, 0x17 ; /* 0xc08000000d0f7811 */ /* 0x000fe200078eb8ff */ /*0b10*/ BSSY B2, 0xe80 ; /* 0x0000036000027945 */ /* 0x000fe20003800000 */ /*0b20*/ IADD3 R16, R16, -0x7f, RZ ; /* 0xffffff8110107810 */ /* 0x000fe40007ffe0ff */ /*0b30*/ IADD3 R17, -R15, R14, RZ ; /* 0x0000000e0f117210 */ /* 0x000fc60007ffe1ff */ /*0b40*/ IMAD R11, R16.reuse, -0x800000, R11 ; /* 0xff800000100b7824 */ /* 0x040fe200078e020b */ /*0b50*/ MUFU.RCP R14, R17 ; /* 0x00000011000e7308 */ /* 0x000e220000001000 */ /*0b60*/ FADD.FTZ R18, -R17, -RZ ; /* 0x800000ff11127221 */ /* 0x000fe20000010100 */ /*0b70*/ IADD3 R16, R16, 0x7f, -R13 ; /* 0x0000007f10107810 */ /* 0x000fc80007ffe80d */ /*0b80*/ IADD3 R16, R16, R9, RZ ; /* 0x0000000910107210 */ /* 0x000fe20007ffe0ff */ /*0b90*/ FFMA R15, R14, R18, 1 ; /* 0x3f8000000e0f7423 */ /* 0x001fc80000000012 */ /*0ba0*/ FFMA R14, R14, R15, R14 ; /* 0x0000000f0e0e7223 */ /* 0x000fc8000000000e */ /*0bb0*/ FFMA R15, R11, R14, RZ ; /* 0x0000000e0b0f7223 */ /* 0x000fc800000000ff */ /*0bc0*/ FFMA R19, R18, R15, R11 ; /* 0x0000000f12137223 */ /* 0x000fc8000000000b */ /*0bd0*/ FFMA R15, R14, R19, R15 ; /* 0x000000130e0f7223 */ /* 0x000fc8000000000f */ /*0be0*/ FFMA R18, R18, R15, R11 ; /* 0x0000000f12127223 */ /* 0x000fc8000000000b */ /*0bf0*/ FFMA R11, R14, R18, R15 ; /* 0x000000120e0b7223 */ /* 0x000fca000000000f */ /*0c00*/ SHF.R.U32.HI R13, RZ, 0x17, R11 ; /* 0x00000017ff0d7819 */ /* 0x000fc8000001160b */ /*0c10*/ LOP3.LUT R13, R13, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff0d0d7812 */ /* 0x000fca00078ec0ff */ /*0c20*/ IMAD.IADD R17, R13, 0x1, R16 ; /* 0x000000010d117824 */ /* 0x000fca00078e0210 */ /*0c30*/ IADD3 R9, R17, -0x1, RZ ; /* 0xffffffff11097810 */ /* 0x000fc80007ffe0ff */ /*0c40*/ ISETP.GE.U32.AND P0, PT, R9, 0xfe, PT ; /* 0x000000fe0900780c */ /* 0x000fda0003f06070 */ /*0c50*/ @!P0 BRA 0xe60 ; /* 0x0000020000008947 */ /* 0x000fea0003800000 */ /*0c60*/ ISETP.GT.AND P0, PT, R17, 0xfe, PT ; /* 0x000000fe1100780c */ /* 0x000fda0003f04270 */ /*0c70*/ @P0 BRA 0xe30 ; /* 0x000001b000000947 */ /* 0x000fea0003800000 */ /*0c80*/ ISETP.GE.AND P0, PT, R17, 0x1, PT ; /* 0x000000011100780c */ /* 0x000fda0003f06270 */ /*0c90*/ @P0 BRA 0xe70 ; /* 0x000001d000000947 */ /* 0x000fea0003800000 */ /*0ca0*/ ISETP.GE.AND P0, PT, R17, -0x18, PT ; /* 0xffffffe81100780c */ /* 0x000fe40003f06270 */ /*0cb0*/ LOP3.LUT R11, R11, 0x80000000, RZ, 0xc0, !PT ; /* 0x800000000b0b7812 */ /* 0x000fd600078ec0ff */ /*0cc0*/ @!P0 BRA 0xe70 ; /* 0x000001a000008947 */ /* 0x000fea0003800000 */ /*0cd0*/ FFMA.RZ R9, R14.reuse, R18.reuse, R15.reuse ; /* 0x000000120e097223 */ /* 0x1c0fe2000000c00f */ /*0ce0*/ IADD3 R16, R17.reuse, 0x20, RZ ; /* 0x0000002011107810 */ /* 0x040fe40007ffe0ff */ /*0cf0*/ ISETP.NE.AND P2, PT, R17, RZ, PT ; /* 0x000000ff1100720c */ /* 0x000fe40003f45270 */ /*0d00*/ LOP3.LUT R13, R9, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff090d7812 */ /* 0x000fe200078ec0ff */ /*0d10*/ FFMA.RP R9, R14.reuse, R18.reuse, R15.reuse ; /* 0x000000120e097223 */ /* 0x1c0fe2000000800f */ /*0d20*/ ISETP.NE.AND P1, PT, R17, RZ, PT ; /* 0x000000ff1100720c */ /* 0x000fe20003f25270 */ /*0d30*/ FFMA.RM R14, R14, R18, R15 ; /* 0x000000120e0e7223 */ /* 0x000fe2000000400f */ /*0d40*/ LOP3.LUT R13, R13, 0x800000, RZ, 0xfc, !PT ; /* 0x008000000d0d7812 */ /* 0x000fe400078efcff */ /*0d50*/ IADD3 R15, -R17, RZ, RZ ; /* 0x000000ff110f7210 */ /* 0x000fc40007ffe1ff */ /*0d60*/ SHF.L.U32 R16, R13, R16, RZ ; /* 0x000000100d107219 */ /* 0x000fe400000006ff */ /*0d70*/ FSETP.NEU.FTZ.AND P0, PT, R9, R14, PT ; /* 0x0000000e0900720b */ /* 0x000fe40003f1d000 */ /*0d80*/ SEL R14, R15, RZ, P2 ; /* 0x000000ff0f0e7207 */ /* 0x000fe40001000000 */ /*0d90*/ ISETP.NE.AND P1, PT, R16, RZ, P1 ; /* 0x000000ff1000720c */ /* 0x000fe40000f25270 */ /*0da0*/ SHF.R.U32.HI R14, RZ, R14, R13 ; /* 0x0000000eff0e7219 */ /* 0x000fe4000001160d */ /*0db0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */ /* 0x000fc40000703570 */ /*0dc0*/ SHF.R.U32.HI R16, RZ, 0x1, R14 ; /* 0x00000001ff107819 */ /* 0x000fe4000001160e */ /*0dd0*/ SEL R9, RZ, 0x1, !P0 ; /* 0x00000001ff097807 */ /* 0x000fc80004000000 */ /*0de0*/ LOP3.LUT R9, R9, 0x1, R16, 0xf8, !PT ; /* 0x0000000109097812 */ /* 0x000fc800078ef810 */ /*0df0*/ LOP3.LUT R9, R9, R14, RZ, 0xc0, !PT ; /* 0x0000000e09097212 */ /* 0x000fc800078ec0ff */ /*0e00*/ IADD3 R16, R16, R9, RZ ; /* 0x0000000910107210 */ /* 0x000fc80007ffe0ff */ /*0e10*/ LOP3.LUT R11, R16, R11, RZ, 0xfc, !PT ; /* 0x0000000b100b7212 */ /* 0x000fe200078efcff */ /*0e20*/ BRA 0xe70 ; /* 0x0000004000007947 */ /* 0x000fea0003800000 */ /*0e30*/ LOP3.LUT R11, R11, 0x80000000, RZ, 0xc0, !PT ; /* 0x800000000b0b7812 */ /* 0x000fc800078ec0ff */ /*0e40*/ LOP3.LUT R11, R11, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f8000000b0b7812 */ /* 0x000fe200078efcff */ /*0e50*/ BRA 0xe70 ; /* 0x0000001000007947 */ /* 0x000fea0003800000 */ /*0e60*/ IMAD R11, R16, 0x800000, R11 ; /* 0x00800000100b7824 */ /* 0x000fe400078e020b */ /*0e70*/ BSYNC B2 ; /* 0x0000000000027941 */ /* 0x000fea0003800000 */ /*0e80*/ BRA 0xf10 ; /* 0x0000008000007947 */ /* 0x000fea0003800000 */ /*0e90*/ LOP3.LUT R11, R14, 0x80000000, R11, 0x48, !PT ; /* 0x800000000e0b7812 */ /* 0x000fc800078e480b */ /*0ea0*/ LOP3.LUT R11, R11, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f8000000b0b7812 */ /* 0x000fe200078efcff */ /*0eb0*/ BRA 0xf10 ; /* 0x0000005000007947 */ /* 0x000fea0003800000 */ /*0ec0*/ LOP3.LUT R11, R14, 0x80000000, R11, 0x48, !PT ; /* 0x800000000e0b7812 */ /* 0x000fe200078e480b */ /*0ed0*/ BRA 0xf10 ; /* 0x0000003000007947 */ /* 0x000fea0003800000 */ /*0ee0*/ MUFU.RSQ R11, -QNAN ; /* 0xffc00000000b7908 */ /* 0x000e220000001400 */ /*0ef0*/ BRA 0xf10 ; /* 0x0000001000007947 */ /* 0x000fea0003800000 */ /*0f00*/ FADD.FTZ R11, R11, R7 ; /* 0x000000070b0b7221 */ /* 0x000fe40000010000 */ /*0f10*/ BSYNC B1 ; /* 0x0000000000017941 */ /* 0x000fea0003800000 */ /*0f20*/ MOV R9, R11 ; /* 0x0000000b00097202 */ /* 0x001fe20000000f00 */ /*0f30*/ HFMA2.MMA R11, -RZ, RZ, 0, 0 ; /* 0x00000000ff0b7435 */ /* 0x000fcc00000001ff */ /*0f40*/ RET.REL.NODEC R10 0x0 ; /* 0xfffff0b00a007950 */ /* 0x000fea0003c3ffff */ /*0f50*/ BRA 0xf50; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0f60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f80*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0f90*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0fa0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0fb0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0fc0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0fd0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0fe0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ff0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); cudaError_t barycentricCuda(const float3 *v0, const float3 *v1, const float3 *v2, const float *da, const float *db, const float *dc, float *dOut, int2 framebufferSize); __device__ __inline__ float dot(const float2 a, const float2 b) { return (a.x * b.x) + (a.y * b.y); } __device__ float2 calculatePosition(int x, int y, float width, float height) { float2 fragSize = make_float2(2 / width, 2 / height); return make_float2(fragSize.x * x + fragSize.y / 2 - 1, (fragSize.y * y + fragSize.y / 2 - 1) * -1); } __global__ void baryKernel(const float3 *v0, const float3 *v1, const float3 *v2, const float *da, const float *db, const float *dc, float *dOut, int *width, int *height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < *width && y < *height) { float2 pos = calculatePosition(x, y, *width, *height); float2 t0 = make_float2(v2->x, v2->y); float2 t1 = make_float2(v0->x, v0->y); float2 t2 = make_float2(v1->x, v1->y); float2 v0 = make_float2(t1.x - t0.x, t1.y - t0.y); float2 v1 = make_float2(t2.x - t0.x, t2.y - t0.y); float2 v2 = make_float2(pos.x - t0.x, pos.y - t0.y); float d00 = dot(v0, v0); float d01 = dot(v0, v1); float d11 = dot(v1, v1); float d20 = dot(v2, v0); float d21 = dot(v2, v1); float denom = d00 * d11 - d01 * d01; float baryX = (d11 * d20 - d01 * d21) / denom; float baryY = (d00 * d21 - d01 * d20) / denom; float baryZ = 1 - baryX - baryY; if (baryX > 0 && baryY > 0 && baryZ > 0) { dOut[y * *width + x] = *da * baryX + *db * baryY + *dc * baryZ; } else { dOut[y * *width + x] = 0; } } } int main() { printf("\n\n\nBarycentric:\n"); int2 framebufferSize = make_int2(50, 50); float3 bary_v0 = make_float3(0, 1, 0); float3 bary_v1 = make_float3(1, -1, 0); float3 bary_v2 = make_float3(-1, -1, 0); float bary_da = 3; float bary_db = 2; float bary_dc = 1; float *bary_dOut = (float*)malloc(framebufferSize.x * framebufferSize.y * sizeof(float*)); // Barycentric in parallel. cudaError_t cudaStatus = barycentricCuda(&bary_v0, &bary_v1, &bary_v2, &bary_da, &bary_db, &bary_dc, bary_dOut, framebufferSize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "barycentricCuda failed!"); return 1; } printf("{\n"); for (int y = 0; y < framebufferSize.y; y++) { printf(" {"); for (int x = 0; x < framebufferSize.x; x++) { printf("%.1f|", bary_dOut[x + y * framebufferSize.y]); } printf("}\n"); } printf("}\n"); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } #define myMalloc(VAR, SIZE, TYPE) cudaStatus = cudaMalloc((void**)&VAR, SIZE * sizeof(TYPE)); \ if (cudaStatus != cudaSuccess) {\ fprintf(stderr, "cudaMalloc failed!"); \ goto Error; \ } #define myVarOnGPU(VAR, SOURCEVAR, SIZE, TYPE) cudaStatus = cudaMalloc((void**)&VAR, SIZE * sizeof(TYPE));\ if (cudaStatus != cudaSuccess) {\ fprintf(stderr, "cudaMalloc failed!");\ goto Error;\ }\ cudaStatus = cudaMemcpy(VAR, SOURCEVAR, SIZE * sizeof(TYPE), cudaMemcpyHostToDevice);\ if (cudaStatus != cudaSuccess) {\ fprintf(stderr, "cudaMemcpy failed!");\ goto Error;\ } // Helper function for using CUDA to add vectors in parallel. cudaError_t barycentricCuda(const float3 *v0, const float3 *v1, const float3 *v2, const float *da, const float *db, const float *dc, float *dOut, int2 framebufferSize) { int length = framebufferSize.x * framebufferSize.y; int bytes = length * sizeof(float); const dim3 windowSize(framebufferSize.x, framebufferSize.y); const dim3 blockSize(16, 16, 1); const dim3 gridSize(windowSize.x / blockSize.x + 1, windowSize.y / blockSize.y + 1); float3 *dev_v0 = 0; float3 *dev_v1 = 0; float3 *dev_v2 = 0; float *dev_da = 0; float *dev_db = 0; float *dev_dc = 0; struct cudaPitchedPtr dstGPU; int *dev_width = 0; int *dev_height = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate and populate GPU buffers for vectors and data. myVarOnGPU(dev_v0, v0, 1, float3); myVarOnGPU(dev_v1, v1, 1, float3); myVarOnGPU(dev_v2, v2, 1, float3); myVarOnGPU(dev_da, da, 1, float); myVarOnGPU(dev_db, db, 1, float); myVarOnGPU(dev_dc, dc, 1, float); myVarOnGPU(dev_width, &framebufferSize.x, 1, int); myVarOnGPU(dev_height, &framebufferSize.y, 1, int); cudaStatus = cudaMalloc3D(&dstGPU, make_cudaExtent(framebufferSize.x * sizeof(float), framebufferSize.y, 1)); // Launch a kernel on the GPU with one thread for each element. baryKernel <<<gridSize, blockSize>>> (dev_v0, dev_v1, dev_v2, dev_da, dev_db, dev_dc, (float *)dstGPU.ptr, dev_width, dev_height); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "barycentricCuda launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching barycentricCuda!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(dOut, dstGPU.ptr, bytes, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dstGPU.ptr); cudaFree(dev_dc); cudaFree(dev_db); cudaFree(dev_da); cudaFree(dev_v2); cudaFree(dev_v0); cudaFree(dev_v1); return cudaStatus; }
.file "tmpxft_001270e4_00000000-6_kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z17calculatePositioniiff .type _Z17calculatePositioniiff, @function _Z17calculatePositioniiff: .LFB2058: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2058: .size _Z17calculatePositioniiff, .-_Z17calculatePositioniiff .globl _Z59__device_stub__Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_PK6float3S1_S1_PKfS3_S3_PfPiS5_ .type _Z59__device_stub__Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_PK6float3S1_S1_PKfS3_S3_PfPiS5_, @function _Z59__device_stub__Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_PK6float3S1_S1_PKfS3_S3_PfPiS5_: .LFB2085: .cfi_startproc endbr64 subq $232, %rsp .cfi_def_cfa_offset 240 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movq %rcx, 48(%rsp) movq %r8, 40(%rsp) movq %r9, 32(%rsp) movq 240(%rsp), %rax movq %rax, 24(%rsp) movq 248(%rsp), %rax movq %rax, 16(%rsp) movq 256(%rsp), %rax movq %rax, 8(%rsp) movq %fs:40, %rax movq %rax, 216(%rsp) xorl %eax, %eax leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) leaq 56(%rsp), %rax movq %rax, 160(%rsp) leaq 48(%rsp), %rax movq %rax, 168(%rsp) leaq 40(%rsp), %rax movq %rax, 176(%rsp) leaq 32(%rsp), %rax movq %rax, 184(%rsp) leaq 24(%rsp), %rax movq %rax, 192(%rsp) leaq 16(%rsp), %rax movq %rax, 200(%rsp) leaq 8(%rsp), %rax movq %rax, 208(%rsp) movl $1, 96(%rsp) movl $1, 100(%rsp) movl $1, 104(%rsp) movl $1, 108(%rsp) movl $1, 112(%rsp) movl $1, 116(%rsp) leaq 88(%rsp), %rcx leaq 80(%rsp), %rdx leaq 108(%rsp), %rsi leaq 96(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L9 .L5: movq 216(%rsp), %rax subq %fs:40, %rax jne .L10 addq $232, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state pushq 88(%rsp) .cfi_def_cfa_offset 248 pushq 88(%rsp) .cfi_def_cfa_offset 256 leaq 160(%rsp), %r9 movq 124(%rsp), %rcx movl 132(%rsp), %r8d movq 112(%rsp), %rsi movl 120(%rsp), %edx leaq _Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 240 jmp .L5 .L10: call __stack_chk_fail@PLT .cfi_endproc .LFE2085: .size _Z59__device_stub__Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_PK6float3S1_S1_PKfS3_S3_PfPiS5_, .-_Z59__device_stub__Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_PK6float3S1_S1_PKfS3_S3_PfPiS5_ .globl _Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_ .type _Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_, @function _Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_: .LFB2086: .cfi_startproc endbr64 subq $16, %rsp .cfi_def_cfa_offset 24 pushq 40(%rsp) .cfi_def_cfa_offset 32 pushq 40(%rsp) .cfi_def_cfa_offset 40 pushq 40(%rsp) .cfi_def_cfa_offset 48 call _Z59__device_stub__Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_PK6float3S1_S1_PKfS3_S3_PfPiS5_ addq $40, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_, .-_Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?" .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "cudaMalloc failed!" .LC2: .string "cudaMemcpy failed!" .section .rodata.str1.8 .align 8 .LC3: .string "barycentricCuda launch failed: %s\n" .align 8 .LC4: .string "cudaDeviceSynchronize returned error code %d after launching barycentricCuda!\n" .text .globl _Z15barycentricCudaPK6float3S1_S1_PKfS3_S3_Pf4int2 .type _Z15barycentricCudaPK6float3S1_S1_PKfS3_S3_Pf4int2, @function _Z15barycentricCudaPK6float3S1_S1_PKfS3_S3_Pf4int2: .LFB2060: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $216, %rsp .cfi_def_cfa_offset 272 movq %rdi, %rbp movq %rsi, %r12 movq %rdx, (%rsp) movq %rcx, 8(%rsp) movq %r8, 16(%rsp) movq %r9, 24(%rsp) movq 272(%rsp), %r15 movq %fs:40, %rax movq %rax, 200(%rsp) xorl %eax, %eax movl 280(%rsp), %r13d movl 284(%rsp), %r14d movl $1, 112(%rsp) movl %r13d, %eax shrl $4, %eax addl $1, %eax movl %eax, 116(%rsp) movl %r14d, %eax shrl $4, %eax addl $1, %eax movl %eax, 120(%rsp) movl $1, 124(%rsp) movq $0, 40(%rsp) movq $0, 48(%rsp) movq $0, 56(%rsp) movq $0, 64(%rsp) movq $0, 72(%rsp) movq $0, 80(%rsp) movq $0, 88(%rsp) movq $0, 96(%rsp) movl $0, %edi call cudaSetDevice@PLT testl %eax, %eax jne .L37 leaq 40(%rsp), %rdi movl $12, %esi call cudaMalloc@PLT movl %eax, %ebx testl %eax, %eax jne .L38 movl $1, %ecx movl $12, %edx movq %rbp, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %ebx testl %eax, %eax jne .L39 leaq 48(%rsp), %rdi movl $12, %esi call cudaMalloc@PLT movl %eax, %ebx testl %eax, %eax jne .L40 movl $1, %ecx movl $12, %edx movq %r12, %rsi movq 48(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %ebx testl %eax, %eax jne .L41 leaq 56(%rsp), %rdi movl $12, %esi call cudaMalloc@PLT movl %eax, %ebx testl %eax, %eax jne .L42 movl $1, %ecx movl $12, %edx movq (%rsp), %rsi movq 56(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %ebx testl %eax, %eax jne .L43 leaq 64(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movl %eax, %ebx testl %eax, %eax jne .L44 movl $1, %ecx movl $4, %edx movq 8(%rsp), %rsi movq 64(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %ebx testl %eax, %eax jne .L45 leaq 72(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movl %eax, %ebx testl %eax, %eax jne .L46 movl $1, %ecx movl $4, %edx movq 16(%rsp), %rsi movq 72(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %ebx testl %eax, %eax jne .L47 leaq 80(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movl %eax, %ebx testl %eax, %eax jne .L48 movl $1, %ecx movl $4, %edx movq 24(%rsp), %rsi movq 80(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %ebx testl %eax, %eax jne .L49 leaq 88(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movl %eax, %ebx testl %eax, %eax jne .L50 leaq 280(%rsp), %rsi movl $1, %ecx movl $4, %edx movq 88(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %ebx testl %eax, %eax jne .L51 leaq 96(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movl %eax, %ebx testl %eax, %eax jne .L52 leaq 284(%rsp), %rsi movl $1, %ecx movl $4, %edx movq 96(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %ebx testl %eax, %eax jne .L53 movslq 284(%rsp), %rdx movslq 280(%rsp), %rax salq $2, %rax movq %rax, 128(%rsp) movq %rdx, 136(%rsp) leaq 160(%rsp), %rdi subq $32, %rsp .cfi_def_cfa_offset 304 movdqa 160(%rsp), %xmm0 movups %xmm0, (%rsp) movq $1, 16(%rsp) call cudaMalloc3D@PLT movl $16, 136(%rsp) movl $16, 140(%rsp) addq $32, %rsp .cfi_def_cfa_offset 272 movl 112(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 104(%rsp), %rdx movq 116(%rsp), %rdi movl 124(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L54 .L32: call cudaGetLastError@PLT movl %eax, %ebx testl %eax, %eax jne .L55 call cudaDeviceSynchronize@PLT movl %eax, %ebx testl %eax, %eax jne .L56 movl %r13d, %edx imull %r14d, %edx sall $2, %edx movslq %edx, %rdx movl $2, %ecx movq 160(%rsp), %rsi movq %r15, %rdi call cudaMemcpy@PLT movl %eax, %ebx testl %eax, %eax je .L15 leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L37: movl %eax, %ebx leaq .LC0(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT .L15: movq 160(%rsp), %rdi call cudaFree@PLT movq 80(%rsp), %rdi call cudaFree@PLT movq 72(%rsp), %rdi call cudaFree@PLT movq 64(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq 48(%rsp), %rdi call cudaFree@PLT movq 200(%rsp), %rax subq %fs:40, %rax jne .L57 movl %ebx, %eax addq $216, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L38: .cfi_restore_state leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L39: leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L40: leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L41: leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L42: leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L43: leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L44: leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L45: leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L46: leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L47: leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L48: leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L49: leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L50: leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L51: leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L52: leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L53: leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L54: subq $8, %rsp .cfi_def_cfa_offset 280 pushq 104(%rsp) .cfi_def_cfa_offset 288 pushq 104(%rsp) .cfi_def_cfa_offset 296 pushq 184(%rsp) .cfi_def_cfa_offset 304 movq 112(%rsp), %r9 movq 104(%rsp), %r8 movq 96(%rsp), %rcx movq 88(%rsp), %rdx movq 80(%rsp), %rsi movq 72(%rsp), %rdi call _Z59__device_stub__Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_PK6float3S1_S1_PKfS3_S3_PfPiS5_ addq $32, %rsp .cfi_def_cfa_offset 272 jmp .L32 .L55: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rcx leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L56: movl %eax, %ecx leaq .LC4(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L15 .L57: call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size _Z15barycentricCudaPK6float3S1_S1_PKfS3_S3_Pf4int2, .-_Z15barycentricCudaPK6float3S1_S1_PKfS3_S3_Pf4int2 .section .rodata.str1.1 .LC5: .string "\n\n\nBarycentric:\n" .LC11: .string "barycentricCuda failed!" .LC12: .string "{\n" .LC13: .string " {" .LC14: .string "%.1f|" .LC15: .string "}\n" .LC16: .string "cudaDeviceReset failed!" .text .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $72, %rsp .cfi_def_cfa_offset 128 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax leaq .LC5(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $0x00000000, 20(%rsp) movss .LC7(%rip), %xmm0 movss %xmm0, 24(%rsp) movl $0x00000000, 28(%rsp) movss %xmm0, 32(%rsp) movss .LC8(%rip), %xmm1 movss %xmm1, 36(%rsp) movl $0x00000000, 40(%rsp) movss %xmm1, 44(%rsp) movss %xmm1, 48(%rsp) movl $0x00000000, 52(%rsp) movl $0x40400000, 8(%rsp) movl $0x40000000, 12(%rsp) movss %xmm0, 16(%rsp) movl $20000, %edi call malloc@PLT movq %rax, %r13 leaq 8(%rsp), %rcx leaq 44(%rsp), %rdx leaq 32(%rsp), %rsi leaq 20(%rsp), %rdi pushq $50 .cfi_def_cfa_offset 136 movl $50, 4(%rsp) pushq %rax .cfi_def_cfa_offset 144 leaq 32(%rsp), %r9 leaq 28(%rsp), %r8 call _Z15barycentricCudaPK6float3S1_S1_PKfS3_S3_Pf4int2 addq $16, %rsp .cfi_def_cfa_offset 128 testl %eax, %eax jne .L68 leaq .LC12(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 200(%r13), %rbp addq $10200, %r13 leaq .LC13(%rip), %r15 leaq .LC14(%rip), %r12 leaq .LC15(%rip), %r14 .L62: movq %r15, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq -200(%rbp), %rbx .L61: pxor %xmm0, %xmm0 cvtss2sd (%rbx), %xmm0 movq %r12, %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT addq $4, %rbx cmpq %rbp, %rbx jne .L61 movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $200, %rbp cmpq %r13, %rbp jne .L62 leaq .LC15(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaDeviceReset@PLT movl %eax, %edx movl $0, %eax testl %edx, %edx jne .L69 .L58: movq 56(%rsp), %rdx subq %fs:40, %rdx jne .L70 addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L68: .cfi_restore_state leaq .LC11(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %eax jmp .L58 .L69: leaq .LC16(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi call __fprintf_chk@PLT movl $1, %eax jmp .L58 .L70: call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.8 .align 8 .LC17: .string "_Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC17(%rip), %rdx movq %rdx, %rcx leaq _Z10baryKernelPK6float3S1_S1_PKfS3_S3_PfPiS5_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC7: .long 1065353216 .align 4 .LC8: .long -1082130432 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4: