system
stringclasses
7 values
input
stringlengths
10
94.5k
output
stringlengths
14
55.5k
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00149573_00000000-6_vector_add_1D.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z29__device_stub__Z6arraddPiS_S_PiS_S_ .type _Z29__device_stub__Z6arraddPiS_S_PiS_S_, @function _Z29__device_stub__Z6arraddPiS_S_PiS_S_: .LFB2082: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z6arraddPiS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z29__device_stub__Z6arraddPiS_S_PiS_S_, .-_Z29__device_stub__Z6arraddPiS_S_PiS_S_ .globl _Z6arraddPiS_S_ .type _Z6arraddPiS_S_, @function _Z6arraddPiS_S_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z6arraddPiS_S_PiS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z6arraddPiS_S_, .-_Z6arraddPiS_S_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "\t%d" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $2464, %rsp .cfi_def_cfa_offset 2496 movq %fs:40, %rax movq %rax, 2456(%rsp) xorl %eax, %eax .L12: movl %eax, 48(%rsp,%rax,4) movl %eax, 848(%rsp,%rax,4) movl $0, 1648(%rsp,%rax,4) addq $1, %rax cmpq $200, %rax jne .L12 movq %rsp, %rdi movl $800, %esi call cudaMalloc@PLT leaq 48(%rsp), %rsi movl $1, %ecx movl $800, %edx movq (%rsp), %rdi call cudaMemcpy@PLT leaq 8(%rsp), %rdi movl $800, %esi call cudaMalloc@PLT leaq 848(%rsp), %rsi movl $1, %ecx movl $800, %edx movq 8(%rsp), %rdi call cudaMemcpy@PLT leaq 16(%rsp), %rdi movl $800, %esi call cudaMalloc@PLT movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $200, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $0, %r9d movl $0, %r8d movq 36(%rsp), %rdx movl $1, %ecx movq 24(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L19 .L13: leaq 1648(%rsp), %rbx movl $2, %ecx movl $800, %edx movq 16(%rsp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT leaq 2448(%rsp), %r12 leaq .LC0(%rip), %rbp .L14: movl (%rbx), %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $4, %rbx cmpq %r12, %rbx jne .L14 movq (%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 2456(%rsp), %rax subq %fs:40, %rax jne .L20 movl $0, %eax addq $2464, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L19: .cfi_restore_state movq 16(%rsp), %rdx movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z29__device_stub__Z6arraddPiS_S_PiS_S_ jmp .L13 .L20: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z6arraddPiS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z6arraddPiS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "vector_add_1D.hip" .globl _Z21__device_stub__arraddPiS_S_ # -- Begin function _Z21__device_stub__arraddPiS_S_ .p2align 4, 0x90 .type _Z21__device_stub__arraddPiS_S_,@function _Z21__device_stub__arraddPiS_S_: # @_Z21__device_stub__arraddPiS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6arraddPiS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z21__device_stub__arraddPiS_S_, .Lfunc_end0-_Z21__device_stub__arraddPiS_S_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $2528, %rsp # imm = 0x9E0 .cfi_def_cfa_offset 2544 .cfi_offset %rbx, -16 leaq 128(%rsp), %rdi xorl %ebx, %ebx movl $800, %edx # imm = 0x320 xorl %esi, %esi callq memset@PLT .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movl %ebx, 1728(%rsp,%rbx,4) movl %ebx, 928(%rsp,%rbx,4) incq %rbx cmpq $200, %rbx jne .LBB1_1 # %bb.2: leaq 16(%rsp), %rdi movl $800, %esi # imm = 0x320 callq hipMalloc movq 16(%rsp), %rdi leaq 1728(%rsp), %rsi movl $800, %edx # imm = 0x320 movl $1, %ecx callq hipMemcpy leaq 8(%rsp), %rdi movl $800, %esi # imm = 0x320 callq hipMalloc movq 8(%rsp), %rdi leaq 928(%rsp), %rsi movl $800, %edx # imm = 0x320 movl $1, %ecx callq hipMemcpy movq %rsp, %rdi movl $800, %esi # imm = 0x320 callq hipMalloc movabsq $4294967297, %rdi # imm = 0x100000001 leaq 199(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_4 # %bb.3: movq 16(%rsp), %rax movq 8(%rsp), %rcx movq (%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movq %rdx, 72(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z6arraddPiS_S_, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_4: movq (%rsp), %rsi leaq 128(%rsp), %rdi movl $800, %edx # imm = 0x320 movl $2, %ecx callq hipMemcpy xorl %ebx, %ebx .p2align 4, 0x90 .LBB1_5: # =>This Inner Loop Header: Depth=1 movl 128(%rsp,%rbx,4), %esi movl $.L.str, %edi xorl %eax, %eax callq printf incq %rbx cmpq $200, %rbx jne .LBB1_5 # %bb.6: movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq (%rsp), %rdi callq hipFree xorl %eax, %eax addq $2528, %rsp # imm = 0x9E0 .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6arraddPiS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z6arraddPiS_S_,@object # @_Z6arraddPiS_S_ .section .rodata,"a",@progbits .globl _Z6arraddPiS_S_ .p2align 3, 0x0 _Z6arraddPiS_S_: .quad _Z21__device_stub__arraddPiS_S_ .size _Z6arraddPiS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "\t%d" .size .L.str, 4 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6arraddPiS_S_" .size .L__unnamed_1, 16 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__arraddPiS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6arraddPiS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include<stdio.h> //#include "cutil.h" #define FADD(a,b) __fadd_rn(a,b) #define sof sizeof(float) #ifdef __DEVICE_EMULATION__ #define EMUSYNC __syncthreads() #else #define EMUSYNC #endif /////////////////////////////////////////////////////////////////////// /** parallel reduction Harris 07 This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) AQ: this routine should be called with dim3 dimBlock(threads, 1, 1); dim3 dimGrid(numBlocks,1,1); but numBlocks = n/(2*threads) this routine meant to be run with numBlocks smaller than this by various factors of 2 int smemSize=threads*sizeof(float); case 512: reduce6<512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata,size); break: case 256: reduce6<256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata,size); */ /////////////////////////////////////////////////////////////////////// /// g_idata input vector on device length n /// g_odata output vector on device length numBlocks /// n length of vector /// reduction kernel template <unsigned int blockSize> // number of threads __global__ void reduce6K(float* g_idata, float* g_odata, unsigned int n) { extern volatile __shared__ float sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = 0; /// we reduce multiple elements per thread. The number is determined by the /// number of active thread blocks (via gridSize). More blocks will result /// in a larger gridSize and therefore fewer elements per thread while (i < n) { sdata[tid] += g_idata[i] + g_idata[i+blockSize]; i += gridSize; } __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } #ifndef __DEVICE_EMULATION__ if (tid < 32) #endif { if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; EMUSYNC; } if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; EMUSYNC; } if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; EMUSYNC; } if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; EMUSYNC; } if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; EMUSYNC; } if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; EMUSYNC; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; __syncthreads(); } /* input vector of length N, output vector of length n*/ void reduce6(float * dataV, float * odataV, int Size, int n){ int N=Size*Size; cudaError_t err; int threads=Size/2; dim3 blockDim(threads); dim3 gridDim(n); size_t sharedMemSize=threads*sof; switch(threads) { // may need more cases here case 32: reduce6K<32> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 64: reduce6K<64> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 128: reduce6K<128> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 256: reduce6K<256> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 512: reduce6K<512> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; } err = cudaThreadSynchronize(); if (err != cudaSuccess) printf("reduce6 kernel execution failed\n"); }
.file "tmpxft_000bcb69_00000000-6_reduce6.cudafe1.cpp" .text #APP #NO_APP .type _ZL39__device_stub__Z8reduce6KILj32EEvPfS0_jPfS_j, @function _ZL39__device_stub__Z8reduce6KILj32EEvPfS0_jPfS_j: .LFB2084: .cfi_startproc subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L5 .L1: movq 120(%rsp), %rax subq %fs:40, %rax jne .L6 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L5: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8reduce6KILj32EEvPfS0_j(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L1 .L6: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _ZL39__device_stub__Z8reduce6KILj32EEvPfS0_jPfS_j, .-_ZL39__device_stub__Z8reduce6KILj32EEvPfS0_jPfS_j .section .text._Z8reduce6KILj32EEvPfS0_j,"axG",@progbits,_Z8reduce6KILj32EEvPfS0_j,comdat .weak _Z8reduce6KILj32EEvPfS0_j .type _Z8reduce6KILj32EEvPfS0_j, @function _Z8reduce6KILj32EEvPfS0_j: .LFB2141: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _ZL39__device_stub__Z8reduce6KILj32EEvPfS0_jPfS_j addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2141: .size _Z8reduce6KILj32EEvPfS0_j, .-_Z8reduce6KILj32EEvPfS0_j .text .type _ZL39__device_stub__Z8reduce6KILj64EEvPfS0_jPfS_j, @function _ZL39__device_stub__Z8reduce6KILj64EEvPfS0_jPfS_j: .LFB2086: .cfi_startproc subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L13 .L9: movq 120(%rsp), %rax subq %fs:40, %rax jne .L14 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L13: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8reduce6KILj64EEvPfS0_j(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L9 .L14: call __stack_chk_fail@PLT .cfi_endproc .LFE2086: .size _ZL39__device_stub__Z8reduce6KILj64EEvPfS0_jPfS_j, .-_ZL39__device_stub__Z8reduce6KILj64EEvPfS0_jPfS_j .section .text._Z8reduce6KILj64EEvPfS0_j,"axG",@progbits,_Z8reduce6KILj64EEvPfS0_j,comdat .weak _Z8reduce6KILj64EEvPfS0_j .type _Z8reduce6KILj64EEvPfS0_j, @function _Z8reduce6KILj64EEvPfS0_j: .LFB2142: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _ZL39__device_stub__Z8reduce6KILj64EEvPfS0_jPfS_j addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2142: .size _Z8reduce6KILj64EEvPfS0_j, .-_Z8reduce6KILj64EEvPfS0_j .text .type _ZL40__device_stub__Z8reduce6KILj128EEvPfS0_jPfS_j, @function _ZL40__device_stub__Z8reduce6KILj128EEvPfS0_jPfS_j: .LFB2088: .cfi_startproc subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L21 .L17: movq 120(%rsp), %rax subq %fs:40, %rax jne .L22 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L21: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8reduce6KILj128EEvPfS0_j(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L17 .L22: call __stack_chk_fail@PLT .cfi_endproc .LFE2088: .size _ZL40__device_stub__Z8reduce6KILj128EEvPfS0_jPfS_j, .-_ZL40__device_stub__Z8reduce6KILj128EEvPfS0_jPfS_j .section .text._Z8reduce6KILj128EEvPfS0_j,"axG",@progbits,_Z8reduce6KILj128EEvPfS0_j,comdat .weak _Z8reduce6KILj128EEvPfS0_j .type _Z8reduce6KILj128EEvPfS0_j, @function _Z8reduce6KILj128EEvPfS0_j: .LFB2143: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _ZL40__device_stub__Z8reduce6KILj128EEvPfS0_jPfS_j addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2143: .size _Z8reduce6KILj128EEvPfS0_j, .-_Z8reduce6KILj128EEvPfS0_j .text .type _ZL40__device_stub__Z8reduce6KILj256EEvPfS0_jPfS_j, @function _ZL40__device_stub__Z8reduce6KILj256EEvPfS0_jPfS_j: .LFB2090: .cfi_startproc subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L29 .L25: movq 120(%rsp), %rax subq %fs:40, %rax jne .L30 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8reduce6KILj256EEvPfS0_j(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L25 .L30: call __stack_chk_fail@PLT .cfi_endproc .LFE2090: .size _ZL40__device_stub__Z8reduce6KILj256EEvPfS0_jPfS_j, .-_ZL40__device_stub__Z8reduce6KILj256EEvPfS0_jPfS_j .section .text._Z8reduce6KILj256EEvPfS0_j,"axG",@progbits,_Z8reduce6KILj256EEvPfS0_j,comdat .weak _Z8reduce6KILj256EEvPfS0_j .type _Z8reduce6KILj256EEvPfS0_j, @function _Z8reduce6KILj256EEvPfS0_j: .LFB2144: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _ZL40__device_stub__Z8reduce6KILj256EEvPfS0_jPfS_j addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2144: .size _Z8reduce6KILj256EEvPfS0_j, .-_Z8reduce6KILj256EEvPfS0_j .text .type _ZL40__device_stub__Z8reduce6KILj512EEvPfS0_jPfS_j, @function _ZL40__device_stub__Z8reduce6KILj512EEvPfS0_jPfS_j: .LFB2092: .cfi_startproc subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L37 .L33: movq 120(%rsp), %rax subq %fs:40, %rax jne .L38 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L37: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8reduce6KILj512EEvPfS0_j(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L33 .L38: call __stack_chk_fail@PLT .cfi_endproc .LFE2092: .size _ZL40__device_stub__Z8reduce6KILj512EEvPfS0_jPfS_j, .-_ZL40__device_stub__Z8reduce6KILj512EEvPfS0_jPfS_j .section .text._Z8reduce6KILj512EEvPfS0_j,"axG",@progbits,_Z8reduce6KILj512EEvPfS0_j,comdat .weak _Z8reduce6KILj512EEvPfS0_j .type _Z8reduce6KILj512EEvPfS0_j, @function _Z8reduce6KILj512EEvPfS0_j: .LFB2145: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _ZL40__device_stub__Z8reduce6KILj512EEvPfS0_jPfS_j addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2145: .size _Z8reduce6KILj512EEvPfS0_j, .-_Z8reduce6KILj512EEvPfS0_j .text .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "reduce6 kernel execution failed\n" .text .globl _Z7reduce6PfS_ii .type _Z7reduce6PfS_ii, @function _Z7reduce6PfS_ii: .LFB2059: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $32, %rsp .cfi_def_cfa_offset 64 movq %rdi, %rbx movq %rsi, %rbp movl %edx, %r12d imull %edx, %r12d movl %edx, %eax shrl $31, %eax addl %edx, %eax sarl %eax movl %eax, 8(%rsp) movl $1, 12(%rsp) movl $1, 16(%rsp) movl %ecx, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) cmpl $128, %eax je .L44 cmpl $257, %edx jle .L53 cmpl $256, %eax je .L49 cmpl $512, %eax jne .L48 movl 16(%rsp), %ecx movl $0, %r9d movl $2048, %r8d movq 8(%rsp), %rdx movq 20(%rsp), %rdi movl 28(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L48 movl %r12d, %edx movq %rbp, %rsi movq %rbx, %rdi call _ZL40__device_stub__Z8reduce6KILj512EEvPfS0_jPfS_j jmp .L48 .L53: cmpl $32, %eax je .L46 cmpl $64, %eax jne .L48 movl 16(%rsp), %ecx movl $0, %r9d movl $256, %r8d movq 8(%rsp), %rdx movq 20(%rsp), %rdi movl 28(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L48 movl %r12d, %edx movq %rbp, %rsi movq %rbx, %rdi call _ZL39__device_stub__Z8reduce6KILj64EEvPfS0_jPfS_j jmp .L48 .L46: movl 16(%rsp), %ecx movl $0, %r9d movl $128, %r8d movq 8(%rsp), %rdx movq 20(%rsp), %rdi movl 28(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L54 .L48: call cudaThreadSynchronize@PLT testl %eax, %eax jne .L55 .L43: addq $32, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L54: .cfi_restore_state movl %r12d, %edx movq %rbp, %rsi movq %rbx, %rdi call _ZL39__device_stub__Z8reduce6KILj32EEvPfS0_jPfS_j jmp .L48 .L44: movl 16(%rsp), %ecx movl $0, %r9d movl $512, %r8d movq 8(%rsp), %rdx movq 20(%rsp), %rdi movl 28(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L48 movl %r12d, %edx movq %rbp, %rsi movq %rbx, %rdi call _ZL40__device_stub__Z8reduce6KILj128EEvPfS0_jPfS_j jmp .L48 .L49: movl 16(%rsp), %ecx movl $0, %r9d movl $1024, %r8d movq 8(%rsp), %rdx movq 20(%rsp), %rdi movl 28(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L48 movl %r12d, %edx movq %rbp, %rsi movq %rbx, %rdi call _ZL40__device_stub__Z8reduce6KILj256EEvPfS0_jPfS_j jmp .L48 .L55: leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L43 .cfi_endproc .LFE2059: .size _Z7reduce6PfS_ii, .-_Z7reduce6PfS_ii .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "_Z8reduce6KILj512EEvPfS0_j" .LC2: .string "_Z8reduce6KILj256EEvPfS0_j" .LC3: .string "_Z8reduce6KILj128EEvPfS0_j" .LC4: .string "_Z8reduce6KILj64EEvPfS0_j" .LC5: .string "_Z8reduce6KILj32EEvPfS0_j" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2095: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z8reduce6KILj512EEvPfS0_j(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z8reduce6KILj256EEvPfS0_j(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z8reduce6KILj128EEvPfS0_j(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z8reduce6KILj64EEvPfS0_j(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _Z8reduce6KILj32EEvPfS0_j(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2095: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include<stdio.h> //#include "cutil.h" #define FADD(a,b) __fadd_rn(a,b) #define sof sizeof(float) #ifdef __DEVICE_EMULATION__ #define EMUSYNC __syncthreads() #else #define EMUSYNC #endif /////////////////////////////////////////////////////////////////////// /** parallel reduction Harris 07 This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) AQ: this routine should be called with dim3 dimBlock(threads, 1, 1); dim3 dimGrid(numBlocks,1,1); but numBlocks = n/(2*threads) this routine meant to be run with numBlocks smaller than this by various factors of 2 int smemSize=threads*sizeof(float); case 512: reduce6<512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata,size); break: case 256: reduce6<256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata,size); */ /////////////////////////////////////////////////////////////////////// /// g_idata input vector on device length n /// g_odata output vector on device length numBlocks /// n length of vector /// reduction kernel template <unsigned int blockSize> // number of threads __global__ void reduce6K(float* g_idata, float* g_odata, unsigned int n) { extern volatile __shared__ float sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = 0; /// we reduce multiple elements per thread. The number is determined by the /// number of active thread blocks (via gridSize). More blocks will result /// in a larger gridSize and therefore fewer elements per thread while (i < n) { sdata[tid] += g_idata[i] + g_idata[i+blockSize]; i += gridSize; } __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } #ifndef __DEVICE_EMULATION__ if (tid < 32) #endif { if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; EMUSYNC; } if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; EMUSYNC; } if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; EMUSYNC; } if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; EMUSYNC; } if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; EMUSYNC; } if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; EMUSYNC; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; __syncthreads(); } /* input vector of length N, output vector of length n*/ void reduce6(float * dataV, float * odataV, int Size, int n){ int N=Size*Size; cudaError_t err; int threads=Size/2; dim3 blockDim(threads); dim3 gridDim(n); size_t sharedMemSize=threads*sof; switch(threads) { // may need more cases here case 32: reduce6K<32> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 64: reduce6K<64> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 128: reduce6K<128> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 256: reduce6K<256> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 512: reduce6K<512> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; } err = cudaThreadSynchronize(); if (err != cudaSuccess) printf("reduce6 kernel execution failed\n"); }
#include <hip/hip_runtime.h> #include<stdio.h> //#include "cutil.h" #define FADD(a,b) __fadd_rn(a,b) #define sof sizeof(float) #ifdef __DEVICE_EMULATION__ #define EMUSYNC __syncthreads() #else #define EMUSYNC #endif /////////////////////////////////////////////////////////////////////// /** parallel reduction Harris 07 This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) AQ: this routine should be called with dim3 dimBlock(threads, 1, 1); dim3 dimGrid(numBlocks,1,1); but numBlocks = n/(2*threads) this routine meant to be run with numBlocks smaller than this by various factors of 2 int smemSize=threads*sizeof(float); case 512: reduce6<512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata,size); break: case 256: reduce6<256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata,size); */ /////////////////////////////////////////////////////////////////////// /// g_idata input vector on device length n /// g_odata output vector on device length numBlocks /// n length of vector /// reduction kernel template <unsigned int blockSize> // number of threads __global__ void reduce6K(float* g_idata, float* g_odata, unsigned int n) { extern volatile __shared__ float sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = 0; /// we reduce multiple elements per thread. The number is determined by the /// number of active thread blocks (via gridSize). More blocks will result /// in a larger gridSize and therefore fewer elements per thread while (i < n) { sdata[tid] += g_idata[i] + g_idata[i+blockSize]; i += gridSize; } __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } #ifndef __DEVICE_EMULATION__ if (tid < 32) #endif { if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; EMUSYNC; } if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; EMUSYNC; } if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; EMUSYNC; } if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; EMUSYNC; } if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; EMUSYNC; } if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; EMUSYNC; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; __syncthreads(); } /* input vector of length N, output vector of length n*/ void reduce6(float * dataV, float * odataV, int Size, int n){ int N=Size*Size; hipError_t err; int threads=Size/2; dim3 blockDim(threads); dim3 gridDim(n); size_t sharedMemSize=threads*sof; switch(threads) { // may need more cases here case 32: reduce6K<32> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 64: reduce6K<64> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 128: reduce6K<128> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 256: reduce6K<256> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 512: reduce6K<512> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; } err = hipDeviceSynchronize(); if (err != hipSuccess) printf("reduce6 kernel execution failed\n"); }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include<stdio.h> //#include "cutil.h" #define FADD(a,b) __fadd_rn(a,b) #define sof sizeof(float) #ifdef __DEVICE_EMULATION__ #define EMUSYNC __syncthreads() #else #define EMUSYNC #endif /////////////////////////////////////////////////////////////////////// /** parallel reduction Harris 07 This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) AQ: this routine should be called with dim3 dimBlock(threads, 1, 1); dim3 dimGrid(numBlocks,1,1); but numBlocks = n/(2*threads) this routine meant to be run with numBlocks smaller than this by various factors of 2 int smemSize=threads*sizeof(float); case 512: reduce6<512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata,size); break: case 256: reduce6<256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata,size); */ /////////////////////////////////////////////////////////////////////// /// g_idata input vector on device length n /// g_odata output vector on device length numBlocks /// n length of vector /// reduction kernel template <unsigned int blockSize> // number of threads __global__ void reduce6K(float* g_idata, float* g_odata, unsigned int n) { extern volatile __shared__ float sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = 0; /// we reduce multiple elements per thread. The number is determined by the /// number of active thread blocks (via gridSize). More blocks will result /// in a larger gridSize and therefore fewer elements per thread while (i < n) { sdata[tid] += g_idata[i] + g_idata[i+blockSize]; i += gridSize; } __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } #ifndef __DEVICE_EMULATION__ if (tid < 32) #endif { if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; EMUSYNC; } if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; EMUSYNC; } if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; EMUSYNC; } if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; EMUSYNC; } if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; EMUSYNC; } if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; EMUSYNC; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; __syncthreads(); } /* input vector of length N, output vector of length n*/ void reduce6(float * dataV, float * odataV, int Size, int n){ int N=Size*Size; hipError_t err; int threads=Size/2; dim3 blockDim(threads); dim3 gridDim(n); size_t sharedMemSize=threads*sof; switch(threads) { // may need more cases here case 32: reduce6K<32> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 64: reduce6K<64> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 128: reduce6K<128> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 256: reduce6K<256> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; case 512: reduce6K<512> <<< gridDim, blockDim, sharedMemSize>>>(dataV, odataV, N); break; } err = hipDeviceSynchronize(); if (err != hipSuccess) printf("reduce6 kernel execution failed\n"); }
.text .file "reduce6.hip" .globl _Z7reduce6PfS_ii # -- Begin function _Z7reduce6PfS_ii .p2align 4, 0x90 .type _Z7reduce6PfS_ii,@function _Z7reduce6PfS_ii: # @_Z7reduce6PfS_ii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $112, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %rbp, -16 movq %rsi, %rbx movq %rdi, %r14 movl %edx, %eax shrl $31, %eax addl %edx, %eax movl %edx, %ebp imull %edx, %ebp sarl %eax movabsq $4294967296, %rsi # imm = 0x100000000 leaq (%rax,%rsi), %rdx movl %ecx, %edi orq %rsi, %rdi movslq %eax, %rcx leaq (,%rcx,4), %r8 cmpl $127, %ecx jle .LBB0_1 # %bb.5: cmpl $128, %eax je .LBB0_12 # %bb.6: cmpl $256, %eax # imm = 0x100 je .LBB0_14 # %bb.7: cmpl $512, %eax # imm = 0x200 jne .LBB0_17 # %bb.8: movl $1, %esi movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB0_17 # %bb.9: movq %r14, 72(%rsp) movq %rbx, 64(%rsp) movl %ebp, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj512EEvPfS0_j, %edi jmp .LBB0_16 .LBB0_1: cmpl $32, %eax je .LBB0_10 # %bb.2: cmpl $64, %eax jne .LBB0_17 # %bb.3: movl $1, %esi movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB0_17 # %bb.4: movq %r14, 72(%rsp) movq %rbx, 64(%rsp) movl %ebp, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj64EEvPfS0_j, %edi jmp .LBB0_16 .LBB0_12: movl $1, %esi movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB0_17 # %bb.13: movq %r14, 72(%rsp) movq %rbx, 64(%rsp) movl %ebp, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj128EEvPfS0_j, %edi jmp .LBB0_16 .LBB0_14: movl $1, %esi movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB0_17 # %bb.15: movq %r14, 72(%rsp) movq %rbx, 64(%rsp) movl %ebp, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj256EEvPfS0_j, %edi jmp .LBB0_16 .LBB0_10: movl $1, %esi movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB0_17 # %bb.11: movq %r14, 72(%rsp) movq %rbx, 64(%rsp) movl %ebp, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj32EEvPfS0_j, %edi .LBB0_16: pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB0_17: callq hipDeviceSynchronize testl %eax, %eax je .LBB0_19 # %bb.18: movl $.Lstr, %edi callq puts@PLT .LBB0_19: addq $112, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z7reduce6PfS_ii, .Lfunc_end0-_Z7reduce6PfS_ii .cfi_endproc # -- End function .section .text._Z23__device_stub__reduce6KILj32EEvPfS0_j,"axG",@progbits,_Z23__device_stub__reduce6KILj32EEvPfS0_j,comdat .weak _Z23__device_stub__reduce6KILj32EEvPfS0_j # -- Begin function _Z23__device_stub__reduce6KILj32EEvPfS0_j .p2align 4, 0x90 .type _Z23__device_stub__reduce6KILj32EEvPfS0_j,@function _Z23__device_stub__reduce6KILj32EEvPfS0_j: # @_Z23__device_stub__reduce6KILj32EEvPfS0_j .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj32EEvPfS0_j, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end1: .size _Z23__device_stub__reduce6KILj32EEvPfS0_j, .Lfunc_end1-_Z23__device_stub__reduce6KILj32EEvPfS0_j .cfi_endproc # -- End function .section .text._Z23__device_stub__reduce6KILj64EEvPfS0_j,"axG",@progbits,_Z23__device_stub__reduce6KILj64EEvPfS0_j,comdat .weak _Z23__device_stub__reduce6KILj64EEvPfS0_j # -- Begin function _Z23__device_stub__reduce6KILj64EEvPfS0_j .p2align 4, 0x90 .type _Z23__device_stub__reduce6KILj64EEvPfS0_j,@function _Z23__device_stub__reduce6KILj64EEvPfS0_j: # @_Z23__device_stub__reduce6KILj64EEvPfS0_j .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj64EEvPfS0_j, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end2: .size _Z23__device_stub__reduce6KILj64EEvPfS0_j, .Lfunc_end2-_Z23__device_stub__reduce6KILj64EEvPfS0_j .cfi_endproc # -- End function .section .text._Z23__device_stub__reduce6KILj128EEvPfS0_j,"axG",@progbits,_Z23__device_stub__reduce6KILj128EEvPfS0_j,comdat .weak _Z23__device_stub__reduce6KILj128EEvPfS0_j # -- Begin function _Z23__device_stub__reduce6KILj128EEvPfS0_j .p2align 4, 0x90 .type _Z23__device_stub__reduce6KILj128EEvPfS0_j,@function _Z23__device_stub__reduce6KILj128EEvPfS0_j: # @_Z23__device_stub__reduce6KILj128EEvPfS0_j .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj128EEvPfS0_j, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end3: .size _Z23__device_stub__reduce6KILj128EEvPfS0_j, .Lfunc_end3-_Z23__device_stub__reduce6KILj128EEvPfS0_j .cfi_endproc # -- End function .section .text._Z23__device_stub__reduce6KILj256EEvPfS0_j,"axG",@progbits,_Z23__device_stub__reduce6KILj256EEvPfS0_j,comdat .weak _Z23__device_stub__reduce6KILj256EEvPfS0_j # -- Begin function _Z23__device_stub__reduce6KILj256EEvPfS0_j .p2align 4, 0x90 .type _Z23__device_stub__reduce6KILj256EEvPfS0_j,@function _Z23__device_stub__reduce6KILj256EEvPfS0_j: # @_Z23__device_stub__reduce6KILj256EEvPfS0_j .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj256EEvPfS0_j, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end4: .size _Z23__device_stub__reduce6KILj256EEvPfS0_j, .Lfunc_end4-_Z23__device_stub__reduce6KILj256EEvPfS0_j .cfi_endproc # -- End function .section .text._Z23__device_stub__reduce6KILj512EEvPfS0_j,"axG",@progbits,_Z23__device_stub__reduce6KILj512EEvPfS0_j,comdat .weak _Z23__device_stub__reduce6KILj512EEvPfS0_j # -- Begin function _Z23__device_stub__reduce6KILj512EEvPfS0_j .p2align 4, 0x90 .type _Z23__device_stub__reduce6KILj512EEvPfS0_j,@function _Z23__device_stub__reduce6KILj512EEvPfS0_j: # @_Z23__device_stub__reduce6KILj512EEvPfS0_j .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj512EEvPfS0_j, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end5: .size _Z23__device_stub__reduce6KILj512EEvPfS0_j, .Lfunc_end5-_Z23__device_stub__reduce6KILj512EEvPfS0_j .cfi_endproc # -- End function .text .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8reduce6KILj32EEvPfS0_j, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8reduce6KILj64EEvPfS0_j, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8reduce6KILj128EEvPfS0_j, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8reduce6KILj256EEvPfS0_j, %esi movl $.L__unnamed_4, %edx movl $.L__unnamed_4, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8reduce6KILj512EEvPfS0_j, %esi movl $.L__unnamed_5, %edx movl $.L__unnamed_5, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type _Z8reduce6KILj32EEvPfS0_j,@object # @_Z8reduce6KILj32EEvPfS0_j .section .rodata._Z8reduce6KILj32EEvPfS0_j,"aG",@progbits,_Z8reduce6KILj32EEvPfS0_j,comdat .weak _Z8reduce6KILj32EEvPfS0_j .p2align 3, 0x0 _Z8reduce6KILj32EEvPfS0_j: .quad _Z23__device_stub__reduce6KILj32EEvPfS0_j .size _Z8reduce6KILj32EEvPfS0_j, 8 .type _Z8reduce6KILj64EEvPfS0_j,@object # @_Z8reduce6KILj64EEvPfS0_j .section .rodata._Z8reduce6KILj64EEvPfS0_j,"aG",@progbits,_Z8reduce6KILj64EEvPfS0_j,comdat .weak _Z8reduce6KILj64EEvPfS0_j .p2align 3, 0x0 _Z8reduce6KILj64EEvPfS0_j: .quad _Z23__device_stub__reduce6KILj64EEvPfS0_j .size _Z8reduce6KILj64EEvPfS0_j, 8 .type _Z8reduce6KILj128EEvPfS0_j,@object # @_Z8reduce6KILj128EEvPfS0_j .section .rodata._Z8reduce6KILj128EEvPfS0_j,"aG",@progbits,_Z8reduce6KILj128EEvPfS0_j,comdat .weak _Z8reduce6KILj128EEvPfS0_j .p2align 3, 0x0 _Z8reduce6KILj128EEvPfS0_j: .quad _Z23__device_stub__reduce6KILj128EEvPfS0_j .size _Z8reduce6KILj128EEvPfS0_j, 8 .type _Z8reduce6KILj256EEvPfS0_j,@object # @_Z8reduce6KILj256EEvPfS0_j .section .rodata._Z8reduce6KILj256EEvPfS0_j,"aG",@progbits,_Z8reduce6KILj256EEvPfS0_j,comdat .weak _Z8reduce6KILj256EEvPfS0_j .p2align 3, 0x0 _Z8reduce6KILj256EEvPfS0_j: .quad _Z23__device_stub__reduce6KILj256EEvPfS0_j .size _Z8reduce6KILj256EEvPfS0_j, 8 .type _Z8reduce6KILj512EEvPfS0_j,@object # @_Z8reduce6KILj512EEvPfS0_j .section .rodata._Z8reduce6KILj512EEvPfS0_j,"aG",@progbits,_Z8reduce6KILj512EEvPfS0_j,comdat .weak _Z8reduce6KILj512EEvPfS0_j .p2align 3, 0x0 _Z8reduce6KILj512EEvPfS0_j: .quad _Z23__device_stub__reduce6KILj512EEvPfS0_j .size _Z8reduce6KILj512EEvPfS0_j, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z8reduce6KILj32EEvPfS0_j" .size .L__unnamed_1, 26 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z8reduce6KILj64EEvPfS0_j" .size .L__unnamed_2, 26 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z8reduce6KILj128EEvPfS0_j" .size .L__unnamed_3, 27 .type .L__unnamed_4,@object # @3 .L__unnamed_4: .asciz "_Z8reduce6KILj256EEvPfS0_j" .size .L__unnamed_4, 27 .type .L__unnamed_5,@object # @4 .L__unnamed_5: .asciz "_Z8reduce6KILj512EEvPfS0_j" .size .L__unnamed_5, 27 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "reduce6 kernel execution failed" .size .Lstr, 32 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z23__device_stub__reduce6KILj32EEvPfS0_j .addrsig_sym _Z23__device_stub__reduce6KILj64EEvPfS0_j .addrsig_sym _Z23__device_stub__reduce6KILj128EEvPfS0_j .addrsig_sym _Z23__device_stub__reduce6KILj256EEvPfS0_j .addrsig_sym _Z23__device_stub__reduce6KILj512EEvPfS0_j .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z8reduce6KILj32EEvPfS0_j .addrsig_sym _Z8reduce6KILj64EEvPfS0_j .addrsig_sym _Z8reduce6KILj128EEvPfS0_j .addrsig_sym _Z8reduce6KILj256EEvPfS0_j .addrsig_sym _Z8reduce6KILj512EEvPfS0_j .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000bcb69_00000000-6_reduce6.cudafe1.cpp" .text #APP #NO_APP .type _ZL39__device_stub__Z8reduce6KILj32EEvPfS0_jPfS_j, @function _ZL39__device_stub__Z8reduce6KILj32EEvPfS0_jPfS_j: .LFB2084: .cfi_startproc subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L5 .L1: movq 120(%rsp), %rax subq %fs:40, %rax jne .L6 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L5: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8reduce6KILj32EEvPfS0_j(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L1 .L6: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _ZL39__device_stub__Z8reduce6KILj32EEvPfS0_jPfS_j, .-_ZL39__device_stub__Z8reduce6KILj32EEvPfS0_jPfS_j .section .text._Z8reduce6KILj32EEvPfS0_j,"axG",@progbits,_Z8reduce6KILj32EEvPfS0_j,comdat .weak _Z8reduce6KILj32EEvPfS0_j .type _Z8reduce6KILj32EEvPfS0_j, @function _Z8reduce6KILj32EEvPfS0_j: .LFB2141: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _ZL39__device_stub__Z8reduce6KILj32EEvPfS0_jPfS_j addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2141: .size _Z8reduce6KILj32EEvPfS0_j, .-_Z8reduce6KILj32EEvPfS0_j .text .type _ZL39__device_stub__Z8reduce6KILj64EEvPfS0_jPfS_j, @function _ZL39__device_stub__Z8reduce6KILj64EEvPfS0_jPfS_j: .LFB2086: .cfi_startproc subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L13 .L9: movq 120(%rsp), %rax subq %fs:40, %rax jne .L14 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L13: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8reduce6KILj64EEvPfS0_j(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L9 .L14: call __stack_chk_fail@PLT .cfi_endproc .LFE2086: .size _ZL39__device_stub__Z8reduce6KILj64EEvPfS0_jPfS_j, .-_ZL39__device_stub__Z8reduce6KILj64EEvPfS0_jPfS_j .section .text._Z8reduce6KILj64EEvPfS0_j,"axG",@progbits,_Z8reduce6KILj64EEvPfS0_j,comdat .weak _Z8reduce6KILj64EEvPfS0_j .type _Z8reduce6KILj64EEvPfS0_j, @function _Z8reduce6KILj64EEvPfS0_j: .LFB2142: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _ZL39__device_stub__Z8reduce6KILj64EEvPfS0_jPfS_j addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2142: .size _Z8reduce6KILj64EEvPfS0_j, .-_Z8reduce6KILj64EEvPfS0_j .text .type _ZL40__device_stub__Z8reduce6KILj128EEvPfS0_jPfS_j, @function _ZL40__device_stub__Z8reduce6KILj128EEvPfS0_jPfS_j: .LFB2088: .cfi_startproc subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L21 .L17: movq 120(%rsp), %rax subq %fs:40, %rax jne .L22 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L21: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8reduce6KILj128EEvPfS0_j(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L17 .L22: call __stack_chk_fail@PLT .cfi_endproc .LFE2088: .size _ZL40__device_stub__Z8reduce6KILj128EEvPfS0_jPfS_j, .-_ZL40__device_stub__Z8reduce6KILj128EEvPfS0_jPfS_j .section .text._Z8reduce6KILj128EEvPfS0_j,"axG",@progbits,_Z8reduce6KILj128EEvPfS0_j,comdat .weak _Z8reduce6KILj128EEvPfS0_j .type _Z8reduce6KILj128EEvPfS0_j, @function _Z8reduce6KILj128EEvPfS0_j: .LFB2143: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _ZL40__device_stub__Z8reduce6KILj128EEvPfS0_jPfS_j addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2143: .size _Z8reduce6KILj128EEvPfS0_j, .-_Z8reduce6KILj128EEvPfS0_j .text .type _ZL40__device_stub__Z8reduce6KILj256EEvPfS0_jPfS_j, @function _ZL40__device_stub__Z8reduce6KILj256EEvPfS0_jPfS_j: .LFB2090: .cfi_startproc subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L29 .L25: movq 120(%rsp), %rax subq %fs:40, %rax jne .L30 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8reduce6KILj256EEvPfS0_j(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L25 .L30: call __stack_chk_fail@PLT .cfi_endproc .LFE2090: .size _ZL40__device_stub__Z8reduce6KILj256EEvPfS0_jPfS_j, .-_ZL40__device_stub__Z8reduce6KILj256EEvPfS0_jPfS_j .section .text._Z8reduce6KILj256EEvPfS0_j,"axG",@progbits,_Z8reduce6KILj256EEvPfS0_j,comdat .weak _Z8reduce6KILj256EEvPfS0_j .type _Z8reduce6KILj256EEvPfS0_j, @function _Z8reduce6KILj256EEvPfS0_j: .LFB2144: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _ZL40__device_stub__Z8reduce6KILj256EEvPfS0_jPfS_j addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2144: .size _Z8reduce6KILj256EEvPfS0_j, .-_Z8reduce6KILj256EEvPfS0_j .text .type _ZL40__device_stub__Z8reduce6KILj512EEvPfS0_jPfS_j, @function _ZL40__device_stub__Z8reduce6KILj512EEvPfS0_jPfS_j: .LFB2092: .cfi_startproc subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L37 .L33: movq 120(%rsp), %rax subq %fs:40, %rax jne .L38 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L37: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8reduce6KILj512EEvPfS0_j(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L33 .L38: call __stack_chk_fail@PLT .cfi_endproc .LFE2092: .size _ZL40__device_stub__Z8reduce6KILj512EEvPfS0_jPfS_j, .-_ZL40__device_stub__Z8reduce6KILj512EEvPfS0_jPfS_j .section .text._Z8reduce6KILj512EEvPfS0_j,"axG",@progbits,_Z8reduce6KILj512EEvPfS0_j,comdat .weak _Z8reduce6KILj512EEvPfS0_j .type _Z8reduce6KILj512EEvPfS0_j, @function _Z8reduce6KILj512EEvPfS0_j: .LFB2145: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _ZL40__device_stub__Z8reduce6KILj512EEvPfS0_jPfS_j addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2145: .size _Z8reduce6KILj512EEvPfS0_j, .-_Z8reduce6KILj512EEvPfS0_j .text .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "reduce6 kernel execution failed\n" .text .globl _Z7reduce6PfS_ii .type _Z7reduce6PfS_ii, @function _Z7reduce6PfS_ii: .LFB2059: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $32, %rsp .cfi_def_cfa_offset 64 movq %rdi, %rbx movq %rsi, %rbp movl %edx, %r12d imull %edx, %r12d movl %edx, %eax shrl $31, %eax addl %edx, %eax sarl %eax movl %eax, 8(%rsp) movl $1, 12(%rsp) movl $1, 16(%rsp) movl %ecx, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) cmpl $128, %eax je .L44 cmpl $257, %edx jle .L53 cmpl $256, %eax je .L49 cmpl $512, %eax jne .L48 movl 16(%rsp), %ecx movl $0, %r9d movl $2048, %r8d movq 8(%rsp), %rdx movq 20(%rsp), %rdi movl 28(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L48 movl %r12d, %edx movq %rbp, %rsi movq %rbx, %rdi call _ZL40__device_stub__Z8reduce6KILj512EEvPfS0_jPfS_j jmp .L48 .L53: cmpl $32, %eax je .L46 cmpl $64, %eax jne .L48 movl 16(%rsp), %ecx movl $0, %r9d movl $256, %r8d movq 8(%rsp), %rdx movq 20(%rsp), %rdi movl 28(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L48 movl %r12d, %edx movq %rbp, %rsi movq %rbx, %rdi call _ZL39__device_stub__Z8reduce6KILj64EEvPfS0_jPfS_j jmp .L48 .L46: movl 16(%rsp), %ecx movl $0, %r9d movl $128, %r8d movq 8(%rsp), %rdx movq 20(%rsp), %rdi movl 28(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L54 .L48: call cudaThreadSynchronize@PLT testl %eax, %eax jne .L55 .L43: addq $32, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L54: .cfi_restore_state movl %r12d, %edx movq %rbp, %rsi movq %rbx, %rdi call _ZL39__device_stub__Z8reduce6KILj32EEvPfS0_jPfS_j jmp .L48 .L44: movl 16(%rsp), %ecx movl $0, %r9d movl $512, %r8d movq 8(%rsp), %rdx movq 20(%rsp), %rdi movl 28(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L48 movl %r12d, %edx movq %rbp, %rsi movq %rbx, %rdi call _ZL40__device_stub__Z8reduce6KILj128EEvPfS0_jPfS_j jmp .L48 .L49: movl 16(%rsp), %ecx movl $0, %r9d movl $1024, %r8d movq 8(%rsp), %rdx movq 20(%rsp), %rdi movl 28(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L48 movl %r12d, %edx movq %rbp, %rsi movq %rbx, %rdi call _ZL40__device_stub__Z8reduce6KILj256EEvPfS0_jPfS_j jmp .L48 .L55: leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L43 .cfi_endproc .LFE2059: .size _Z7reduce6PfS_ii, .-_Z7reduce6PfS_ii .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "_Z8reduce6KILj512EEvPfS0_j" .LC2: .string "_Z8reduce6KILj256EEvPfS0_j" .LC3: .string "_Z8reduce6KILj128EEvPfS0_j" .LC4: .string "_Z8reduce6KILj64EEvPfS0_j" .LC5: .string "_Z8reduce6KILj32EEvPfS0_j" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2095: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z8reduce6KILj512EEvPfS0_j(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z8reduce6KILj256EEvPfS0_j(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z8reduce6KILj128EEvPfS0_j(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z8reduce6KILj64EEvPfS0_j(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _Z8reduce6KILj32EEvPfS0_j(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2095: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "reduce6.hip" .globl _Z7reduce6PfS_ii # -- Begin function _Z7reduce6PfS_ii .p2align 4, 0x90 .type _Z7reduce6PfS_ii,@function _Z7reduce6PfS_ii: # @_Z7reduce6PfS_ii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $112, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %rbp, -16 movq %rsi, %rbx movq %rdi, %r14 movl %edx, %eax shrl $31, %eax addl %edx, %eax movl %edx, %ebp imull %edx, %ebp sarl %eax movabsq $4294967296, %rsi # imm = 0x100000000 leaq (%rax,%rsi), %rdx movl %ecx, %edi orq %rsi, %rdi movslq %eax, %rcx leaq (,%rcx,4), %r8 cmpl $127, %ecx jle .LBB0_1 # %bb.5: cmpl $128, %eax je .LBB0_12 # %bb.6: cmpl $256, %eax # imm = 0x100 je .LBB0_14 # %bb.7: cmpl $512, %eax # imm = 0x200 jne .LBB0_17 # %bb.8: movl $1, %esi movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB0_17 # %bb.9: movq %r14, 72(%rsp) movq %rbx, 64(%rsp) movl %ebp, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj512EEvPfS0_j, %edi jmp .LBB0_16 .LBB0_1: cmpl $32, %eax je .LBB0_10 # %bb.2: cmpl $64, %eax jne .LBB0_17 # %bb.3: movl $1, %esi movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB0_17 # %bb.4: movq %r14, 72(%rsp) movq %rbx, 64(%rsp) movl %ebp, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj64EEvPfS0_j, %edi jmp .LBB0_16 .LBB0_12: movl $1, %esi movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB0_17 # %bb.13: movq %r14, 72(%rsp) movq %rbx, 64(%rsp) movl %ebp, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj128EEvPfS0_j, %edi jmp .LBB0_16 .LBB0_14: movl $1, %esi movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB0_17 # %bb.15: movq %r14, 72(%rsp) movq %rbx, 64(%rsp) movl %ebp, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj256EEvPfS0_j, %edi jmp .LBB0_16 .LBB0_10: movl $1, %esi movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB0_17 # %bb.11: movq %r14, 72(%rsp) movq %rbx, 64(%rsp) movl %ebp, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj32EEvPfS0_j, %edi .LBB0_16: pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB0_17: callq hipDeviceSynchronize testl %eax, %eax je .LBB0_19 # %bb.18: movl $.Lstr, %edi callq puts@PLT .LBB0_19: addq $112, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z7reduce6PfS_ii, .Lfunc_end0-_Z7reduce6PfS_ii .cfi_endproc # -- End function .section .text._Z23__device_stub__reduce6KILj32EEvPfS0_j,"axG",@progbits,_Z23__device_stub__reduce6KILj32EEvPfS0_j,comdat .weak _Z23__device_stub__reduce6KILj32EEvPfS0_j # -- Begin function _Z23__device_stub__reduce6KILj32EEvPfS0_j .p2align 4, 0x90 .type _Z23__device_stub__reduce6KILj32EEvPfS0_j,@function _Z23__device_stub__reduce6KILj32EEvPfS0_j: # @_Z23__device_stub__reduce6KILj32EEvPfS0_j .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj32EEvPfS0_j, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end1: .size _Z23__device_stub__reduce6KILj32EEvPfS0_j, .Lfunc_end1-_Z23__device_stub__reduce6KILj32EEvPfS0_j .cfi_endproc # -- End function .section .text._Z23__device_stub__reduce6KILj64EEvPfS0_j,"axG",@progbits,_Z23__device_stub__reduce6KILj64EEvPfS0_j,comdat .weak _Z23__device_stub__reduce6KILj64EEvPfS0_j # -- Begin function _Z23__device_stub__reduce6KILj64EEvPfS0_j .p2align 4, 0x90 .type _Z23__device_stub__reduce6KILj64EEvPfS0_j,@function _Z23__device_stub__reduce6KILj64EEvPfS0_j: # @_Z23__device_stub__reduce6KILj64EEvPfS0_j .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj64EEvPfS0_j, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end2: .size _Z23__device_stub__reduce6KILj64EEvPfS0_j, .Lfunc_end2-_Z23__device_stub__reduce6KILj64EEvPfS0_j .cfi_endproc # -- End function .section .text._Z23__device_stub__reduce6KILj128EEvPfS0_j,"axG",@progbits,_Z23__device_stub__reduce6KILj128EEvPfS0_j,comdat .weak _Z23__device_stub__reduce6KILj128EEvPfS0_j # -- Begin function _Z23__device_stub__reduce6KILj128EEvPfS0_j .p2align 4, 0x90 .type _Z23__device_stub__reduce6KILj128EEvPfS0_j,@function _Z23__device_stub__reduce6KILj128EEvPfS0_j: # @_Z23__device_stub__reduce6KILj128EEvPfS0_j .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj128EEvPfS0_j, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end3: .size _Z23__device_stub__reduce6KILj128EEvPfS0_j, .Lfunc_end3-_Z23__device_stub__reduce6KILj128EEvPfS0_j .cfi_endproc # -- End function .section .text._Z23__device_stub__reduce6KILj256EEvPfS0_j,"axG",@progbits,_Z23__device_stub__reduce6KILj256EEvPfS0_j,comdat .weak _Z23__device_stub__reduce6KILj256EEvPfS0_j # -- Begin function _Z23__device_stub__reduce6KILj256EEvPfS0_j .p2align 4, 0x90 .type _Z23__device_stub__reduce6KILj256EEvPfS0_j,@function _Z23__device_stub__reduce6KILj256EEvPfS0_j: # @_Z23__device_stub__reduce6KILj256EEvPfS0_j .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj256EEvPfS0_j, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end4: .size _Z23__device_stub__reduce6KILj256EEvPfS0_j, .Lfunc_end4-_Z23__device_stub__reduce6KILj256EEvPfS0_j .cfi_endproc # -- End function .section .text._Z23__device_stub__reduce6KILj512EEvPfS0_j,"axG",@progbits,_Z23__device_stub__reduce6KILj512EEvPfS0_j,comdat .weak _Z23__device_stub__reduce6KILj512EEvPfS0_j # -- Begin function _Z23__device_stub__reduce6KILj512EEvPfS0_j .p2align 4, 0x90 .type _Z23__device_stub__reduce6KILj512EEvPfS0_j,@function _Z23__device_stub__reduce6KILj512EEvPfS0_j: # @_Z23__device_stub__reduce6KILj512EEvPfS0_j .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8reduce6KILj512EEvPfS0_j, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end5: .size _Z23__device_stub__reduce6KILj512EEvPfS0_j, .Lfunc_end5-_Z23__device_stub__reduce6KILj512EEvPfS0_j .cfi_endproc # -- End function .text .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8reduce6KILj32EEvPfS0_j, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8reduce6KILj64EEvPfS0_j, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8reduce6KILj128EEvPfS0_j, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8reduce6KILj256EEvPfS0_j, %esi movl $.L__unnamed_4, %edx movl $.L__unnamed_4, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8reduce6KILj512EEvPfS0_j, %esi movl $.L__unnamed_5, %edx movl $.L__unnamed_5, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type _Z8reduce6KILj32EEvPfS0_j,@object # @_Z8reduce6KILj32EEvPfS0_j .section .rodata._Z8reduce6KILj32EEvPfS0_j,"aG",@progbits,_Z8reduce6KILj32EEvPfS0_j,comdat .weak _Z8reduce6KILj32EEvPfS0_j .p2align 3, 0x0 _Z8reduce6KILj32EEvPfS0_j: .quad _Z23__device_stub__reduce6KILj32EEvPfS0_j .size _Z8reduce6KILj32EEvPfS0_j, 8 .type _Z8reduce6KILj64EEvPfS0_j,@object # @_Z8reduce6KILj64EEvPfS0_j .section .rodata._Z8reduce6KILj64EEvPfS0_j,"aG",@progbits,_Z8reduce6KILj64EEvPfS0_j,comdat .weak _Z8reduce6KILj64EEvPfS0_j .p2align 3, 0x0 _Z8reduce6KILj64EEvPfS0_j: .quad _Z23__device_stub__reduce6KILj64EEvPfS0_j .size _Z8reduce6KILj64EEvPfS0_j, 8 .type _Z8reduce6KILj128EEvPfS0_j,@object # @_Z8reduce6KILj128EEvPfS0_j .section .rodata._Z8reduce6KILj128EEvPfS0_j,"aG",@progbits,_Z8reduce6KILj128EEvPfS0_j,comdat .weak _Z8reduce6KILj128EEvPfS0_j .p2align 3, 0x0 _Z8reduce6KILj128EEvPfS0_j: .quad _Z23__device_stub__reduce6KILj128EEvPfS0_j .size _Z8reduce6KILj128EEvPfS0_j, 8 .type _Z8reduce6KILj256EEvPfS0_j,@object # @_Z8reduce6KILj256EEvPfS0_j .section .rodata._Z8reduce6KILj256EEvPfS0_j,"aG",@progbits,_Z8reduce6KILj256EEvPfS0_j,comdat .weak _Z8reduce6KILj256EEvPfS0_j .p2align 3, 0x0 _Z8reduce6KILj256EEvPfS0_j: .quad _Z23__device_stub__reduce6KILj256EEvPfS0_j .size _Z8reduce6KILj256EEvPfS0_j, 8 .type _Z8reduce6KILj512EEvPfS0_j,@object # @_Z8reduce6KILj512EEvPfS0_j .section .rodata._Z8reduce6KILj512EEvPfS0_j,"aG",@progbits,_Z8reduce6KILj512EEvPfS0_j,comdat .weak _Z8reduce6KILj512EEvPfS0_j .p2align 3, 0x0 _Z8reduce6KILj512EEvPfS0_j: .quad _Z23__device_stub__reduce6KILj512EEvPfS0_j .size _Z8reduce6KILj512EEvPfS0_j, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z8reduce6KILj32EEvPfS0_j" .size .L__unnamed_1, 26 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z8reduce6KILj64EEvPfS0_j" .size .L__unnamed_2, 26 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z8reduce6KILj128EEvPfS0_j" .size .L__unnamed_3, 27 .type .L__unnamed_4,@object # @3 .L__unnamed_4: .asciz "_Z8reduce6KILj256EEvPfS0_j" .size .L__unnamed_4, 27 .type .L__unnamed_5,@object # @4 .L__unnamed_5: .asciz "_Z8reduce6KILj512EEvPfS0_j" .size .L__unnamed_5, 27 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "reduce6 kernel execution failed" .size .Lstr, 32 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z23__device_stub__reduce6KILj32EEvPfS0_j .addrsig_sym _Z23__device_stub__reduce6KILj64EEvPfS0_j .addrsig_sym _Z23__device_stub__reduce6KILj128EEvPfS0_j .addrsig_sym _Z23__device_stub__reduce6KILj256EEvPfS0_j .addrsig_sym _Z23__device_stub__reduce6KILj512EEvPfS0_j .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z8reduce6KILj32EEvPfS0_j .addrsig_sym _Z8reduce6KILj64EEvPfS0_j .addrsig_sym _Z8reduce6KILj128EEvPfS0_j .addrsig_sym _Z8reduce6KILj256EEvPfS0_j .addrsig_sym _Z8reduce6KILj512EEvPfS0_j .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> __global__ void k_means_gpu(int b, int n, int num, const float *xyz, const float *init_xyz, int *result) { //xyz(b,n,3) result(b,n) init_xyz(b,num,3) int batch_idx = blockIdx.x; xyz += batch_idx*n*3; init_xyz += batch_idx*num*3; result += batch_idx*n; float x_c,y_c,z_c,x,y,z; int inte = 0; extern __shared__ float s[]; float *temp_dist = s; int *ct_idx_old = (int*)&temp_dist[n*num]; float *ct_xyz =(float*)&ct_idx_old[n]; float *temp_ct = (float*)&ct_xyz[num*3]; int *ct_cnt = (int*)&temp_ct[num*3]; int tid = threadIdx.x; //test: num=2 n=1024 float min_dist = 1e8; if(tid<num) { ct_cnt[tid]=0; temp_ct[tid*3]=0; temp_ct[tid*3+1]=0; temp_ct[tid*3+2]=0; } if(tid<n) { ct_idx_old[tid]=0; } __syncthreads(); while(inte<3) { if(tid<num) { temp_ct[tid*3] += xyz[tid*3]; temp_ct[tid*3+1] += xyz[tid*3+1]; temp_ct[tid*3+2] += xyz[tid*3+2]; } for (int j=threadIdx.x;j<n;j+=blockDim.x) //一个点一个点处理 { x = xyz[j*3]; y = xyz[j*3+1]; z = xyz[j*3+2]; for(int i=0;i<num;i+=1) //获得第j个点与第i个中心点的距离 { if(inte == 0) { x_c = init_xyz[i*3]; y_c = init_xyz[i*3+1]; z_c = init_xyz[i*3+2]; } else { x_c = ct_xyz[i*3]; y_c = ct_xyz[i*3+1]; z_c = ct_xyz[i*3+2]; } temp_dist[j*num+i] = (x-x_c)*(x-x_c)+(y-y_c)*(y-y_c)+(z-z_c)*(z-z_c); if(temp_dist[j*num+i]<min_dist) { result[j] = i; min_dist = temp_dist[j*num+i]; } } // min_dist=1e8; } if(tid==0) { for(int ct=0;ct<n;ct++) { ct_cnt[result[ct]]++; temp_ct[result[ct]*3] += xyz[ct*3]; temp_ct[result[ct]*3+1] += xyz[ct*3+1]; temp_ct[result[ct]*3+2] += xyz[ct*3+2]; } } __syncthreads(); if(tid<num) { ct_xyz[tid*3] = temp_ct[tid*3]/ct_cnt[tid]; ct_xyz[tid*3+1] = temp_ct[tid*3+1]/ct_cnt[tid]; ct_xyz[tid*3+2] = temp_ct[tid*3+2]/ct_cnt[tid]; temp_ct[tid*3] =0; temp_ct[tid*3+1] =0; temp_ct[tid*3+2] =0; ct_cnt[tid] = 0; } /* for (int j=threadIdx.x;j<n;j+=blockDim.x) //遍历所有的点,检查是否需要继续迭代 { if(result[j]==ct_idx_old[j]) { cnt=cnt++; __syncthreads(); result[j]=1; } else { result[j]=0; } }*/ for (int j=threadIdx.x;j<n;j+=blockDim.x) //遍历所有的点 { ct_idx_old[j]=result[j]; } inte++; } } /* for(int i=0;i<num;i+=1) //num个类 { while(p_num!=1) { if(p_num>512) { if(temp_dist[i*n+j]>temp_dist[i*m+j+cnt*512]) { temp_dist[i*n+j] = temp_dist[i*m+j+cnt*512]; idx_dist[j] = i*m+j+cnt*512; cnt++; p_num -=512; } } if(p_num<=512) { if(j<(p_num/2) { int stride = p_num/2; if(temp_dist[i*n+j]>temp_dist[i*m+j+stride]) { idx_dist[j] = i*m+j+stride; temp_dist[i*n+j]= = temp_dist[i*m+j+stride]; p_num = p_num/2; } } } } } */ void kmeans(int b, int n,int num, const float *xyz, const float *init_xyz, int *result) { k_means_gpu<<<b,512,n*num*sizeof(float)+n*sizeof(int)+num*3*sizeof(float)+num*3*sizeof(float)+num*sizeof(int)>>>(b,n,num,xyz,init_xyz,result); }
.file "tmpxft_001b6a8b_00000000-6_cuda_test.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z40__device_stub__Z11k_means_gpuiiiPKfS0_PiiiiPKfS0_Pi .type _Z40__device_stub__Z11k_means_gpuiiiPKfS0_PiiiiPKfS0_Pi, @function _Z40__device_stub__Z11k_means_gpuiiiPKfS0_PiiiiPKfS0_Pi: .LFB2082: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movl %edi, 44(%rsp) movl %esi, 40(%rsp) movl %edx, 36(%rsp) movq %rcx, 24(%rsp) movq %r8, 16(%rsp) movq %r9, 8(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 44(%rsp), %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rax movq %rax, 120(%rsp) leaq 36(%rsp), %rax movq %rax, 128(%rsp) leaq 24(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z11k_means_gpuiiiPKfS0_Pi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z40__device_stub__Z11k_means_gpuiiiPKfS0_PiiiiPKfS0_Pi, .-_Z40__device_stub__Z11k_means_gpuiiiPKfS0_PiiiiPKfS0_Pi .globl _Z11k_means_gpuiiiPKfS0_Pi .type _Z11k_means_gpuiiiPKfS0_Pi, @function _Z11k_means_gpuiiiPKfS0_Pi: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z40__device_stub__Z11k_means_gpuiiiPKfS0_PiiiiPKfS0_Pi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z11k_means_gpuiiiPKfS0_Pi, .-_Z11k_means_gpuiiiPKfS0_Pi .globl _Z6kmeansiiiPKfS0_Pi .type _Z6kmeansiiiPKfS0_Pi, @function _Z6kmeansiiiPKfS0_Pi: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $40, %rsp .cfi_def_cfa_offset 96 movl %edi, %r12d movl %esi, %ebp movl %edx, %ebx movq %rcx, %r13 movq %r8, %r14 movq %r9, %r15 movl $512, 20(%rsp) movl $1, 24(%rsp) movl %edi, 8(%rsp) movl $1, 12(%rsp) movl %esi, %eax imull %edx, %eax cltq movslq %esi, %rdx movslq %ebx, %rcx addq %rcx, %rdx addq %rdx, %rax leal (%rbx,%rbx,2), %edx movslq %edx, %rdx leaq (%rax,%rdx,2), %rax movl $0, %r9d leaq 0(,%rax,4), %r8 movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L11: addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state movq %r15, %r9 movq %r14, %r8 movq %r13, %rcx movl %ebx, %edx movl %ebp, %esi movl %r12d, %edi call _Z40__device_stub__Z11k_means_gpuiiiPKfS0_PiiiiPKfS0_Pi jmp .L11 .cfi_endproc .LFE2057: .size _Z6kmeansiiiPKfS0_Pi, .-_Z6kmeansiiiPKfS0_Pi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z11k_means_gpuiiiPKfS0_Pi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z11k_means_gpuiiiPKfS0_Pi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> __global__ void k_means_gpu(int b, int n, int num, const float *xyz, const float *init_xyz, int *result) { //xyz(b,n,3) result(b,n) init_xyz(b,num,3) int batch_idx = blockIdx.x; xyz += batch_idx*n*3; init_xyz += batch_idx*num*3; result += batch_idx*n; float x_c,y_c,z_c,x,y,z; int inte = 0; extern __shared__ float s[]; float *temp_dist = s; int *ct_idx_old = (int*)&temp_dist[n*num]; float *ct_xyz =(float*)&ct_idx_old[n]; float *temp_ct = (float*)&ct_xyz[num*3]; int *ct_cnt = (int*)&temp_ct[num*3]; int tid = threadIdx.x; //test: num=2 n=1024 float min_dist = 1e8; if(tid<num) { ct_cnt[tid]=0; temp_ct[tid*3]=0; temp_ct[tid*3+1]=0; temp_ct[tid*3+2]=0; } if(tid<n) { ct_idx_old[tid]=0; } __syncthreads(); while(inte<3) { if(tid<num) { temp_ct[tid*3] += xyz[tid*3]; temp_ct[tid*3+1] += xyz[tid*3+1]; temp_ct[tid*3+2] += xyz[tid*3+2]; } for (int j=threadIdx.x;j<n;j+=blockDim.x) //一个点一个点处理 { x = xyz[j*3]; y = xyz[j*3+1]; z = xyz[j*3+2]; for(int i=0;i<num;i+=1) //获得第j个点与第i个中心点的距离 { if(inte == 0) { x_c = init_xyz[i*3]; y_c = init_xyz[i*3+1]; z_c = init_xyz[i*3+2]; } else { x_c = ct_xyz[i*3]; y_c = ct_xyz[i*3+1]; z_c = ct_xyz[i*3+2]; } temp_dist[j*num+i] = (x-x_c)*(x-x_c)+(y-y_c)*(y-y_c)+(z-z_c)*(z-z_c); if(temp_dist[j*num+i]<min_dist) { result[j] = i; min_dist = temp_dist[j*num+i]; } } // min_dist=1e8; } if(tid==0) { for(int ct=0;ct<n;ct++) { ct_cnt[result[ct]]++; temp_ct[result[ct]*3] += xyz[ct*3]; temp_ct[result[ct]*3+1] += xyz[ct*3+1]; temp_ct[result[ct]*3+2] += xyz[ct*3+2]; } } __syncthreads(); if(tid<num) { ct_xyz[tid*3] = temp_ct[tid*3]/ct_cnt[tid]; ct_xyz[tid*3+1] = temp_ct[tid*3+1]/ct_cnt[tid]; ct_xyz[tid*3+2] = temp_ct[tid*3+2]/ct_cnt[tid]; temp_ct[tid*3] =0; temp_ct[tid*3+1] =0; temp_ct[tid*3+2] =0; ct_cnt[tid] = 0; } /* for (int j=threadIdx.x;j<n;j+=blockDim.x) //遍历所有的点,检查是否需要继续迭代 { if(result[j]==ct_idx_old[j]) { cnt=cnt++; __syncthreads(); result[j]=1; } else { result[j]=0; } }*/ for (int j=threadIdx.x;j<n;j+=blockDim.x) //遍历所有的点 { ct_idx_old[j]=result[j]; } inte++; } } /* for(int i=0;i<num;i+=1) //num个类 { while(p_num!=1) { if(p_num>512) { if(temp_dist[i*n+j]>temp_dist[i*m+j+cnt*512]) { temp_dist[i*n+j] = temp_dist[i*m+j+cnt*512]; idx_dist[j] = i*m+j+cnt*512; cnt++; p_num -=512; } } if(p_num<=512) { if(j<(p_num/2) { int stride = p_num/2; if(temp_dist[i*n+j]>temp_dist[i*m+j+stride]) { idx_dist[j] = i*m+j+stride; temp_dist[i*n+j]= = temp_dist[i*m+j+stride]; p_num = p_num/2; } } } } } */ void kmeans(int b, int n,int num, const float *xyz, const float *init_xyz, int *result) { k_means_gpu<<<b,512,n*num*sizeof(float)+n*sizeof(int)+num*3*sizeof(float)+num*3*sizeof(float)+num*sizeof(int)>>>(b,n,num,xyz,init_xyz,result); }
#include "hip/hip_runtime.h" #include<stdio.h> __global__ void k_means_gpu(int b, int n, int num, const float *xyz, const float *init_xyz, int *result) { //xyz(b,n,3) result(b,n) init_xyz(b,num,3) int batch_idx = blockIdx.x; xyz += batch_idx*n*3; init_xyz += batch_idx*num*3; result += batch_idx*n; float x_c,y_c,z_c,x,y,z; int inte = 0; extern __shared__ float s[]; float *temp_dist = s; int *ct_idx_old = (int*)&temp_dist[n*num]; float *ct_xyz =(float*)&ct_idx_old[n]; float *temp_ct = (float*)&ct_xyz[num*3]; int *ct_cnt = (int*)&temp_ct[num*3]; int tid = threadIdx.x; //test: num=2 n=1024 float min_dist = 1e8; if(tid<num) { ct_cnt[tid]=0; temp_ct[tid*3]=0; temp_ct[tid*3+1]=0; temp_ct[tid*3+2]=0; } if(tid<n) { ct_idx_old[tid]=0; } __syncthreads(); while(inte<3) { if(tid<num) { temp_ct[tid*3] += xyz[tid*3]; temp_ct[tid*3+1] += xyz[tid*3+1]; temp_ct[tid*3+2] += xyz[tid*3+2]; } for (int j=threadIdx.x;j<n;j+=blockDim.x) //一个点一个点处理 { x = xyz[j*3]; y = xyz[j*3+1]; z = xyz[j*3+2]; for(int i=0;i<num;i+=1) //获得第j个点与第i个中心点的距离 { if(inte == 0) { x_c = init_xyz[i*3]; y_c = init_xyz[i*3+1]; z_c = init_xyz[i*3+2]; } else { x_c = ct_xyz[i*3]; y_c = ct_xyz[i*3+1]; z_c = ct_xyz[i*3+2]; } temp_dist[j*num+i] = (x-x_c)*(x-x_c)+(y-y_c)*(y-y_c)+(z-z_c)*(z-z_c); if(temp_dist[j*num+i]<min_dist) { result[j] = i; min_dist = temp_dist[j*num+i]; } } // min_dist=1e8; } if(tid==0) { for(int ct=0;ct<n;ct++) { ct_cnt[result[ct]]++; temp_ct[result[ct]*3] += xyz[ct*3]; temp_ct[result[ct]*3+1] += xyz[ct*3+1]; temp_ct[result[ct]*3+2] += xyz[ct*3+2]; } } __syncthreads(); if(tid<num) { ct_xyz[tid*3] = temp_ct[tid*3]/ct_cnt[tid]; ct_xyz[tid*3+1] = temp_ct[tid*3+1]/ct_cnt[tid]; ct_xyz[tid*3+2] = temp_ct[tid*3+2]/ct_cnt[tid]; temp_ct[tid*3] =0; temp_ct[tid*3+1] =0; temp_ct[tid*3+2] =0; ct_cnt[tid] = 0; } /* for (int j=threadIdx.x;j<n;j+=blockDim.x) //遍历所有的点,检查是否需要继续迭代 { if(result[j]==ct_idx_old[j]) { cnt=cnt++; __syncthreads(); result[j]=1; } else { result[j]=0; } }*/ for (int j=threadIdx.x;j<n;j+=blockDim.x) //遍历所有的点 { ct_idx_old[j]=result[j]; } inte++; } } /* for(int i=0;i<num;i+=1) //num个类 { while(p_num!=1) { if(p_num>512) { if(temp_dist[i*n+j]>temp_dist[i*m+j+cnt*512]) { temp_dist[i*n+j] = temp_dist[i*m+j+cnt*512]; idx_dist[j] = i*m+j+cnt*512; cnt++; p_num -=512; } } if(p_num<=512) { if(j<(p_num/2) { int stride = p_num/2; if(temp_dist[i*n+j]>temp_dist[i*m+j+stride]) { idx_dist[j] = i*m+j+stride; temp_dist[i*n+j]= = temp_dist[i*m+j+stride]; p_num = p_num/2; } } } } } */ void kmeans(int b, int n,int num, const float *xyz, const float *init_xyz, int *result) { k_means_gpu<<<b,512,n*num*sizeof(float)+n*sizeof(int)+num*3*sizeof(float)+num*3*sizeof(float)+num*sizeof(int)>>>(b,n,num,xyz,init_xyz,result); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include "hip/hip_runtime.h" #include<stdio.h> __global__ void k_means_gpu(int b, int n, int num, const float *xyz, const float *init_xyz, int *result) { //xyz(b,n,3) result(b,n) init_xyz(b,num,3) int batch_idx = blockIdx.x; xyz += batch_idx*n*3; init_xyz += batch_idx*num*3; result += batch_idx*n; float x_c,y_c,z_c,x,y,z; int inte = 0; extern __shared__ float s[]; float *temp_dist = s; int *ct_idx_old = (int*)&temp_dist[n*num]; float *ct_xyz =(float*)&ct_idx_old[n]; float *temp_ct = (float*)&ct_xyz[num*3]; int *ct_cnt = (int*)&temp_ct[num*3]; int tid = threadIdx.x; //test: num=2 n=1024 float min_dist = 1e8; if(tid<num) { ct_cnt[tid]=0; temp_ct[tid*3]=0; temp_ct[tid*3+1]=0; temp_ct[tid*3+2]=0; } if(tid<n) { ct_idx_old[tid]=0; } __syncthreads(); while(inte<3) { if(tid<num) { temp_ct[tid*3] += xyz[tid*3]; temp_ct[tid*3+1] += xyz[tid*3+1]; temp_ct[tid*3+2] += xyz[tid*3+2]; } for (int j=threadIdx.x;j<n;j+=blockDim.x) //一个点一个点处理 { x = xyz[j*3]; y = xyz[j*3+1]; z = xyz[j*3+2]; for(int i=0;i<num;i+=1) //获得第j个点与第i个中心点的距离 { if(inte == 0) { x_c = init_xyz[i*3]; y_c = init_xyz[i*3+1]; z_c = init_xyz[i*3+2]; } else { x_c = ct_xyz[i*3]; y_c = ct_xyz[i*3+1]; z_c = ct_xyz[i*3+2]; } temp_dist[j*num+i] = (x-x_c)*(x-x_c)+(y-y_c)*(y-y_c)+(z-z_c)*(z-z_c); if(temp_dist[j*num+i]<min_dist) { result[j] = i; min_dist = temp_dist[j*num+i]; } } // min_dist=1e8; } if(tid==0) { for(int ct=0;ct<n;ct++) { ct_cnt[result[ct]]++; temp_ct[result[ct]*3] += xyz[ct*3]; temp_ct[result[ct]*3+1] += xyz[ct*3+1]; temp_ct[result[ct]*3+2] += xyz[ct*3+2]; } } __syncthreads(); if(tid<num) { ct_xyz[tid*3] = temp_ct[tid*3]/ct_cnt[tid]; ct_xyz[tid*3+1] = temp_ct[tid*3+1]/ct_cnt[tid]; ct_xyz[tid*3+2] = temp_ct[tid*3+2]/ct_cnt[tid]; temp_ct[tid*3] =0; temp_ct[tid*3+1] =0; temp_ct[tid*3+2] =0; ct_cnt[tid] = 0; } /* for (int j=threadIdx.x;j<n;j+=blockDim.x) //遍历所有的点,检查是否需要继续迭代 { if(result[j]==ct_idx_old[j]) { cnt=cnt++; __syncthreads(); result[j]=1; } else { result[j]=0; } }*/ for (int j=threadIdx.x;j<n;j+=blockDim.x) //遍历所有的点 { ct_idx_old[j]=result[j]; } inte++; } } /* for(int i=0;i<num;i+=1) //num个类 { while(p_num!=1) { if(p_num>512) { if(temp_dist[i*n+j]>temp_dist[i*m+j+cnt*512]) { temp_dist[i*n+j] = temp_dist[i*m+j+cnt*512]; idx_dist[j] = i*m+j+cnt*512; cnt++; p_num -=512; } } if(p_num<=512) { if(j<(p_num/2) { int stride = p_num/2; if(temp_dist[i*n+j]>temp_dist[i*m+j+stride]) { idx_dist[j] = i*m+j+stride; temp_dist[i*n+j]= = temp_dist[i*m+j+stride]; p_num = p_num/2; } } } } } */ void kmeans(int b, int n,int num, const float *xyz, const float *init_xyz, int *result) { k_means_gpu<<<b,512,n*num*sizeof(float)+n*sizeof(int)+num*3*sizeof(float)+num*3*sizeof(float)+num*sizeof(int)>>>(b,n,num,xyz,init_xyz,result); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z11k_means_gpuiiiPKfS0_Pi .globl _Z11k_means_gpuiiiPKfS0_Pi .p2align 8 .type _Z11k_means_gpuiiiPKfS0_Pi,@function _Z11k_means_gpuiiiPKfS0_Pi: s_load_b64 s[8:9], s[0:1], 0x4 s_waitcnt lgkmcnt(0) s_mul_i32 s14, s9, s8 s_lshl_b32 s4, s8, 2 s_lshl_b32 s2, s14, 2 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) s_add_i32 s3, s2, 0 v_cmp_gt_i32_e64 s2, s9, v0 s_add_i32 s18, s3, s4 s_mul_i32 s4, s9, 12 s_add_i32 s19, s18, s4 s_delay_alu instid0(SALU_CYCLE_1) s_add_i32 s20, s19, s4 s_and_saveexec_b32 s4, s2 s_cbranch_execz .LBB0_2 v_mul_u32_u24_e32 v1, 3, v0 v_lshl_add_u32 v2, v0, 2, s20 v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v4, 0 v_mov_b32_e32 v5, 0 s_delay_alu instid0(VALU_DEP_4) v_lshl_add_u32 v1, v1, 2, s19 ds_store_b32 v2, v3 ds_store_2addr_b32 v1, v4, v5 offset1:1 ds_store_b32 v1, v3 offset:8 .LBB0_2: s_or_b32 exec_lo, exec_lo, s4 s_delay_alu instid0(SALU_CYCLE_1) s_mov_b32 s4, exec_lo v_cmpx_gt_i32_e64 s8, v0 s_cbranch_execz .LBB0_4 v_lshl_add_u32 v1, v0, 2, s3 v_mov_b32_e32 v2, 0 ds_store_b32 v1, v2 .LBB0_4: s_or_b32 exec_lo, exec_lo, s4 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x10 s_load_b64 s[10:11], s[0:1], 0x20 s_mul_i32 s12, s15, s8 v_mul_u32_u24_e32 v1, 3, v0 s_mul_i32 s16, s12, 3 s_mul_i32 s13, s9, s15 s_ashr_i32 s17, s16, 31 s_mul_i32 s24, s13, 3 s_lshl_b64 s[16:17], s[16:17], 2 v_dual_mov_b32 v22, 0 :: v_dual_lshlrev_b32 v1, 2, v1 v_cmp_eq_u32_e32 vcc_lo, 0, v0 v_lshlrev_b32_e32 v10, 2, v0 v_cmp_gt_i32_e64 s3, s8, v0 s_delay_alu instid0(VALU_DEP_4) v_add_nc_u32_e32 v2, 4, v1 v_add_nc_u32_e32 v3, 8, v1 v_add_nc_u32_e32 v13, s19, v1 v_add_nc_u32_e32 v17, s18, v1 v_add_nc_u32_e32 v16, s20, v10 v_add_nc_u32_e32 v14, s19, v2 s_waitcnt lgkmcnt(0) s_add_u32 s21, s16, s4 s_addc_u32 s22, s17, s5 s_ashr_i32 s25, s24, 31 v_add_nc_u32_e32 v15, s19, v3 s_lshl_b64 s[4:5], s[24:25], 2 v_add_nc_u32_e32 v18, s18, v2 s_add_u32 s4, s6, s4 s_addc_u32 s5, s7, s5 s_ashr_i32 s13, s12, 31 v_add_nc_u32_e32 v19, s18, v3 s_lshl_b64 s[6:7], s[12:13], 2 v_add_co_u32 v4, s12, s21, v1 s_add_u32 s6, s10, s6 v_add_co_u32 v6, s10, s21, v2 s_addc_u32 s7, s11, s7 v_add_co_ci_u32_e64 v7, null, s22, 0, s10 s_cmp_gt_i32 s9, 0 v_add_co_u32 v8, s10, s21, v3 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v9, null, s22, 0, s10 s_cselect_b32 s23, -1, 0 s_add_u32 s10, s0, 40 s_addc_u32 s11, s1, 0 s_cmp_gt_i32 s8, 0 v_mul_lo_u32 v1, v0, s9 s_cselect_b32 s0, -1, 0 s_lshl_b32 s25, s9, 2 s_and_b32 s24, vcc_lo, s0 v_add_co_ci_u32_e64 v5, null, s22, 0, s12 s_add_u32 s12, s21, 4 s_addc_u32 s13, s22, 0 s_lshl_b32 s0, s14, 2 v_lshl_add_u32 v20, v1, 2, 0 v_add3_u32 v21, 0, s0, v10 s_mov_b32 s26, 0 s_mov_b64 s[14:15], src_shared_base s_barrier buffer_gl0_inv s_branch .LBB0_6 .LBB0_5: s_or_b32 exec_lo, exec_lo, s0 s_add_i32 s26, s26, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s26, 3 s_cbranch_scc1 .LBB0_24 .LBB0_6: s_and_saveexec_b32 s0, s2 s_cbranch_execz .LBB0_8 s_clause 0x2 global_load_b32 v1, v[4:5], off global_load_b32 v2, v[6:7], off global_load_b32 v3, v[8:9], off ds_load_b32 v10, v13 ds_load_b32 v11, v14 ds_load_b32 v12, v15 s_waitcnt vmcnt(1) lgkmcnt(1) v_dual_add_f32 v1, v1, v10 :: v_dual_add_f32 v2, v2, v11 s_waitcnt vmcnt(0) lgkmcnt(0) v_add_f32_e32 v3, v3, v12 ds_store_b32 v13, v1 ds_store_b32 v14, v2 ds_store_b32 v15, v3 .LBB0_8: s_or_b32 exec_lo, exec_lo, s0 s_and_saveexec_b32 s14, s3 s_cbranch_execz .LBB0_16 s_load_b32 s0, s[10:11], 0xc s_cmp_eq_u32 s26, 0 v_mov_b32_e32 v23, v20 v_mov_b32_e32 v10, v0 s_cselect_b32 s16, -1, 0 s_mov_b32 s27, 0 s_waitcnt lgkmcnt(0) s_and_b32 s17, s0, 0xffff s_delay_alu instid0(SALU_CYCLE_1) s_mul_i32 s28, s25, s17 s_branch .LBB0_11 .LBB0_10: s_set_inst_prefetch_distance 0x2 v_add_nc_u32_e32 v10, s17, v10 v_add_nc_u32_e32 v23, s28, v23 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_cmp_le_i32_e32 vcc_lo, s8, v10 s_or_b32 s27, vcc_lo, s27 s_and_not1_b32 exec_lo, exec_lo, s27 s_cbranch_execz .LBB0_16 .LBB0_11: s_and_not1_b32 vcc_lo, exec_lo, s23 s_cbranch_vccnz .LBB0_10 v_lshl_add_u32 v1, v10, 1, v10 v_ashrrev_i32_e32 v11, 31, v10 v_dual_mov_b32 v25, 0x4cbebc20 :: v_dual_mov_b32 v24, v23 s_mov_b32 s29, 0 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[11:12], 2, v[10:11] s_mov_b64 s[0:1], s[4:5] s_mov_b32 s30, s18 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[1:2], 2, v[1:2] v_add_co_u32 v1, vcc_lo, s21, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v2, vcc_lo, s22, v2, vcc_lo v_add_co_u32 v11, vcc_lo, s6, v11 v_add_co_ci_u32_e32 v12, vcc_lo, s7, v12, vcc_lo global_load_b96 v[1:3], v[1:2], off s_set_inst_prefetch_distance 0x1 s_branch .LBB0_14 .p2align 6 .LBB0_13: s_or_b32 exec_lo, exec_lo, s31 s_add_i32 s29, s29, 1 s_add_i32 s30, s30, 12 v_add_nc_u32_e32 v24, 4, v24 s_add_u32 s0, s0, 12 s_addc_u32 s1, s1, 0 s_cmp_eq_u32 s9, s29 s_cbranch_scc1 .LBB0_10 .LBB0_14: s_cmp_lg_u32 s30, -1 s_cselect_b32 s31, s30, 0 s_cselect_b32 s33, s15, 0 s_and_b32 s34, s16, exec_lo s_cselect_b32 s35, s1, s33 s_cselect_b32 s34, s0, s31 s_mov_b32 s31, exec_lo v_dual_mov_b32 v26, s34 :: v_dual_mov_b32 v27, s35 flat_load_b96 v[26:28], v[26:27] s_waitcnt vmcnt(0) lgkmcnt(0) v_sub_f32_e32 v29, v1, v26 v_sub_f32_e32 v27, v2, v27 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f32_e32 v26, v27, v27 v_dual_sub_f32 v27, v3, v28 :: v_dual_fmac_f32 v26, v29, v29 s_delay_alu instid0(VALU_DEP_1) v_fmac_f32_e32 v26, v27, v27 ds_store_b32 v24, v26 v_cmpx_lt_f32_e32 v26, v25 s_cbranch_execz .LBB0_13 v_mov_b32_e32 v27, s29 v_mov_b32_e32 v25, v26 global_store_b32 v[11:12], v27, off s_branch .LBB0_13 .LBB0_16: s_or_b32 exec_lo, exec_lo, s14 s_and_saveexec_b32 s14, s24 s_cbranch_execz .LBB0_19 s_mov_b64 s[0:1], s[6:7] s_mov_b64 s[16:17], s[12:13] s_mov_b32 s27, s8 s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB0_18: s_clause 0x2 global_load_b32 v3, v22, s[0:1] global_load_b32 v12, v22, s[16:17] offset:-4 global_load_b64 v[1:2], v22, s[16:17] s_add_i32 s27, s27, -1 s_add_u32 s16, s16, 12 s_addc_u32 s17, s17, 0 s_add_u32 s0, s0, 4 s_addc_u32 s1, s1, 0 s_cmp_lg_u32 s27, 0 s_waitcnt vmcnt(2) v_mul_lo_u32 v10, v3, 12 v_lshlrev_b32_e32 v3, 2, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_nc_u32_e32 v3, s20, v3 v_add_nc_u32_e32 v23, s19, v10 ds_load_b32 v24, v3 ds_load_2addr_b32 v[10:11], v23 offset1:1 ds_load_b32 v25, v23 offset:8 s_waitcnt vmcnt(0) lgkmcnt(1) v_dual_add_f32 v1, v1, v11 :: v_dual_add_nc_u32 v24, 1, v24 v_add_f32_e32 v10, v12, v10 s_waitcnt lgkmcnt(0) v_add_f32_e32 v2, v2, v25 ds_store_2addr_b32 v23, v10, v1 offset1:1 ds_store_b32 v3, v24 ds_store_b32 v23, v2 offset:8 s_cbranch_scc1 .LBB0_18 .LBB0_19: s_set_inst_prefetch_distance 0x2 s_or_b32 exec_lo, exec_lo, s14 s_waitcnt lgkmcnt(0) s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv s_and_saveexec_b32 s14, s2 s_cbranch_execz .LBB0_21 ds_load_b32 v1, v16 ds_load_b32 v2, v13 ds_load_b32 v3, v14 ds_load_b32 v10, v15 s_waitcnt lgkmcnt(3) v_cvt_f32_i32_e32 v1, v1 s_waitcnt lgkmcnt(2) s_delay_alu instid0(VALU_DEP_1) v_div_scale_f32 v11, null, v1, v1, v2 s_waitcnt lgkmcnt(1) v_div_scale_f32 v12, null, v1, v1, v3 s_waitcnt lgkmcnt(0) v_div_scale_f32 v23, null, v1, v1, v10 v_rcp_f32_e32 v24, v11 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_rcp_f32_e32 v25, v12 v_div_scale_f32 v27, vcc_lo, v2, v1, v2 v_rcp_f32_e32 v26, v23 v_div_scale_f32 v28, s0, v3, v1, v3 v_div_scale_f32 v32, s1, v10, v1, v10 s_delay_alu instid0(TRANS32_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2) v_fma_f32 v29, -v11, v24, 1.0 s_waitcnt_depctr 0xfff v_fma_f32 v30, -v12, v25, 1.0 v_fma_f32 v31, -v23, v26, 1.0 v_dual_fmac_f32 v24, v29, v24 :: v_dual_fmac_f32 v25, v30, v25 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_mul_f32 v29, v27, v24 :: v_dual_mul_f32 v30, v28, v25 v_fma_f32 v33, -v11, v29, v27 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_fmac_f32_e32 v26, v31, v26 v_fma_f32 v34, -v12, v30, v28 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_fmac_f32_e32 v29, v33, v24 v_dual_mul_f32 v31, v32, v26 :: v_dual_fmac_f32 v30, v34, v25 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f32 v11, -v11, v29, v27 v_fma_f32 v35, -v23, v31, v32 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_fma_f32 v12, -v12, v30, v28 v_div_fmas_f32 v11, v11, v24, v29 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_fmac_f32_e32 v31, v35, v26 s_mov_b32 vcc_lo, s0 v_div_fmas_f32 v12, v12, v25, v30 s_mov_b32 vcc_lo, s1 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_fma_f32 v23, -v23, v31, v32 v_div_fixup_f32 v2, v11, v1, v2 v_div_fixup_f32 v3, v12, v1, v3 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_div_fmas_f32 v23, v23, v26, v31 v_div_fixup_f32 v1, v23, v1, v10 ds_store_b32 v17, v2 ds_store_b32 v13, v22 ds_store_b32 v18, v3 ds_store_b32 v19, v1 ds_store_b32 v15, v22 ds_store_b32 v14, v22 ds_store_b32 v16, v22 .LBB0_21: s_or_b32 exec_lo, exec_lo, s14 s_and_saveexec_b32 s0, s3 s_cbranch_execz .LBB0_5 s_load_b32 s1, s[10:11], 0xc v_mov_b32_e32 v3, v21 v_mov_b32_e32 v1, v0 s_mov_b32 s16, 0 s_waitcnt lgkmcnt(0) s_and_b32 s1, s1, 0xffff s_delay_alu instid0(SALU_CYCLE_1) s_lshl_b32 s14, s1, 2 .p2align 6 .LBB0_23: v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[10:11], 2, v[1:2] v_add_nc_u32_e32 v1, s1, v1 v_add_co_u32 v10, vcc_lo, s6, v10 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_ci_u32_e32 v11, vcc_lo, s7, v11, vcc_lo v_cmp_le_i32_e32 vcc_lo, s8, v1 global_load_b32 v2, v[10:11], off s_or_b32 s16, vcc_lo, s16 s_waitcnt vmcnt(0) ds_store_b32 v3, v2 v_add_nc_u32_e32 v3, s14, v3 s_and_not1_b32 exec_lo, exec_lo, s16 s_cbranch_execnz .LBB0_23 s_branch .LBB0_5 .LBB0_24: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11k_means_gpuiiiPKfS0_Pi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 36 .amdhsa_next_free_sgpr 36 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z11k_means_gpuiiiPKfS0_Pi, .Lfunc_end0-_Z11k_means_gpuiiiPKfS0_Pi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .offset: 4 .size: 4 .value_kind: by_value - .offset: 8 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 32 .size: 8 .value_kind: global_buffer - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims - .offset: 160 .size: 4 .value_kind: hidden_dynamic_lds_size .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11k_means_gpuiiiPKfS0_Pi .private_segment_fixed_size: 0 .sgpr_count: 38 .sgpr_spill_count: 0 .symbol: _Z11k_means_gpuiiiPKfS0_Pi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 36 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include "hip/hip_runtime.h" #include<stdio.h> __global__ void k_means_gpu(int b, int n, int num, const float *xyz, const float *init_xyz, int *result) { //xyz(b,n,3) result(b,n) init_xyz(b,num,3) int batch_idx = blockIdx.x; xyz += batch_idx*n*3; init_xyz += batch_idx*num*3; result += batch_idx*n; float x_c,y_c,z_c,x,y,z; int inte = 0; extern __shared__ float s[]; float *temp_dist = s; int *ct_idx_old = (int*)&temp_dist[n*num]; float *ct_xyz =(float*)&ct_idx_old[n]; float *temp_ct = (float*)&ct_xyz[num*3]; int *ct_cnt = (int*)&temp_ct[num*3]; int tid = threadIdx.x; //test: num=2 n=1024 float min_dist = 1e8; if(tid<num) { ct_cnt[tid]=0; temp_ct[tid*3]=0; temp_ct[tid*3+1]=0; temp_ct[tid*3+2]=0; } if(tid<n) { ct_idx_old[tid]=0; } __syncthreads(); while(inte<3) { if(tid<num) { temp_ct[tid*3] += xyz[tid*3]; temp_ct[tid*3+1] += xyz[tid*3+1]; temp_ct[tid*3+2] += xyz[tid*3+2]; } for (int j=threadIdx.x;j<n;j+=blockDim.x) //一个点一个点处理 { x = xyz[j*3]; y = xyz[j*3+1]; z = xyz[j*3+2]; for(int i=0;i<num;i+=1) //获得第j个点与第i个中心点的距离 { if(inte == 0) { x_c = init_xyz[i*3]; y_c = init_xyz[i*3+1]; z_c = init_xyz[i*3+2]; } else { x_c = ct_xyz[i*3]; y_c = ct_xyz[i*3+1]; z_c = ct_xyz[i*3+2]; } temp_dist[j*num+i] = (x-x_c)*(x-x_c)+(y-y_c)*(y-y_c)+(z-z_c)*(z-z_c); if(temp_dist[j*num+i]<min_dist) { result[j] = i; min_dist = temp_dist[j*num+i]; } } // min_dist=1e8; } if(tid==0) { for(int ct=0;ct<n;ct++) { ct_cnt[result[ct]]++; temp_ct[result[ct]*3] += xyz[ct*3]; temp_ct[result[ct]*3+1] += xyz[ct*3+1]; temp_ct[result[ct]*3+2] += xyz[ct*3+2]; } } __syncthreads(); if(tid<num) { ct_xyz[tid*3] = temp_ct[tid*3]/ct_cnt[tid]; ct_xyz[tid*3+1] = temp_ct[tid*3+1]/ct_cnt[tid]; ct_xyz[tid*3+2] = temp_ct[tid*3+2]/ct_cnt[tid]; temp_ct[tid*3] =0; temp_ct[tid*3+1] =0; temp_ct[tid*3+2] =0; ct_cnt[tid] = 0; } /* for (int j=threadIdx.x;j<n;j+=blockDim.x) //遍历所有的点,检查是否需要继续迭代 { if(result[j]==ct_idx_old[j]) { cnt=cnt++; __syncthreads(); result[j]=1; } else { result[j]=0; } }*/ for (int j=threadIdx.x;j<n;j+=blockDim.x) //遍历所有的点 { ct_idx_old[j]=result[j]; } inte++; } } /* for(int i=0;i<num;i+=1) //num个类 { while(p_num!=1) { if(p_num>512) { if(temp_dist[i*n+j]>temp_dist[i*m+j+cnt*512]) { temp_dist[i*n+j] = temp_dist[i*m+j+cnt*512]; idx_dist[j] = i*m+j+cnt*512; cnt++; p_num -=512; } } if(p_num<=512) { if(j<(p_num/2) { int stride = p_num/2; if(temp_dist[i*n+j]>temp_dist[i*m+j+stride]) { idx_dist[j] = i*m+j+stride; temp_dist[i*n+j]= = temp_dist[i*m+j+stride]; p_num = p_num/2; } } } } } */ void kmeans(int b, int n,int num, const float *xyz, const float *init_xyz, int *result) { k_means_gpu<<<b,512,n*num*sizeof(float)+n*sizeof(int)+num*3*sizeof(float)+num*3*sizeof(float)+num*sizeof(int)>>>(b,n,num,xyz,init_xyz,result); }
.text .file "cuda_test.hip" .globl _Z26__device_stub__k_means_gpuiiiPKfS0_Pi # -- Begin function _Z26__device_stub__k_means_gpuiiiPKfS0_Pi .p2align 4, 0x90 .type _Z26__device_stub__k_means_gpuiiiPKfS0_Pi,@function _Z26__device_stub__k_means_gpuiiiPKfS0_Pi: # @_Z26__device_stub__k_means_gpuiiiPKfS0_Pi .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 20(%rsp) movl %esi, 16(%rsp) movl %edx, 12(%rsp) movq %rcx, 88(%rsp) movq %r8, 80(%rsp) movq %r9, 72(%rsp) leaq 20(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 88(%rsp), %rax movq %rax, 120(%rsp) leaq 80(%rsp), %rax movq %rax, 128(%rsp) leaq 72(%rsp), %rax movq %rax, 136(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z11k_means_gpuiiiPKfS0_Pi, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z26__device_stub__k_means_gpuiiiPKfS0_Pi, .Lfunc_end0-_Z26__device_stub__k_means_gpuiiiPKfS0_Pi .cfi_endproc # -- End function .globl _Z6kmeansiiiPKfS0_Pi # -- Begin function _Z6kmeansiiiPKfS0_Pi .p2align 4, 0x90 .type _Z6kmeansiiiPKfS0_Pi,@function _Z6kmeansiiiPKfS0_Pi: # @_Z6kmeansiiiPKfS0_Pi .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $152, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %r9, %rbx movq %r8, %r14 movq %rcx, %r15 movl %edx, %r12d movl %esi, %ebp movl %edi, %r13d movl %edx, %eax imull %esi, %eax cltq movslq %esi, %rcx leal (%r12,%r12,2), %edx movslq %edx, %rdx shlq $3, %rdx movslq %r12d, %rsi addq %rcx, %rsi addq %rax, %rsi leaq (%rdx,%rsi,4), %r8 movl %edi, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $512, %rdx # imm = 0x200 movl $1, %esi movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movl %r13d, 20(%rsp) movl %ebp, 16(%rsp) movl %r12d, 12(%rsp) movq %r15, 88(%rsp) movq %r14, 80(%rsp) movq %rbx, 72(%rsp) leaq 20(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 88(%rsp), %rax movq %rax, 120(%rsp) leaq 80(%rsp), %rax movq %rax, 128(%rsp) leaq 72(%rsp), %rax movq %rax, 136(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z11k_means_gpuiiiPKfS0_Pi, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: addq $152, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z6kmeansiiiPKfS0_Pi, .Lfunc_end1-_Z6kmeansiiiPKfS0_Pi .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11k_means_gpuiiiPKfS0_Pi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z11k_means_gpuiiiPKfS0_Pi,@object # @_Z11k_means_gpuiiiPKfS0_Pi .section .rodata,"a",@progbits .globl _Z11k_means_gpuiiiPKfS0_Pi .p2align 3, 0x0 _Z11k_means_gpuiiiPKfS0_Pi: .quad _Z26__device_stub__k_means_gpuiiiPKfS0_Pi .size _Z11k_means_gpuiiiPKfS0_Pi, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z11k_means_gpuiiiPKfS0_Pi" .size .L__unnamed_1, 27 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z26__device_stub__k_means_gpuiiiPKfS0_Pi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z11k_means_gpuiiiPKfS0_Pi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001b6a8b_00000000-6_cuda_test.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z40__device_stub__Z11k_means_gpuiiiPKfS0_PiiiiPKfS0_Pi .type _Z40__device_stub__Z11k_means_gpuiiiPKfS0_PiiiiPKfS0_Pi, @function _Z40__device_stub__Z11k_means_gpuiiiPKfS0_PiiiiPKfS0_Pi: .LFB2082: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movl %edi, 44(%rsp) movl %esi, 40(%rsp) movl %edx, 36(%rsp) movq %rcx, 24(%rsp) movq %r8, 16(%rsp) movq %r9, 8(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 44(%rsp), %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rax movq %rax, 120(%rsp) leaq 36(%rsp), %rax movq %rax, 128(%rsp) leaq 24(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z11k_means_gpuiiiPKfS0_Pi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z40__device_stub__Z11k_means_gpuiiiPKfS0_PiiiiPKfS0_Pi, .-_Z40__device_stub__Z11k_means_gpuiiiPKfS0_PiiiiPKfS0_Pi .globl _Z11k_means_gpuiiiPKfS0_Pi .type _Z11k_means_gpuiiiPKfS0_Pi, @function _Z11k_means_gpuiiiPKfS0_Pi: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z40__device_stub__Z11k_means_gpuiiiPKfS0_PiiiiPKfS0_Pi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z11k_means_gpuiiiPKfS0_Pi, .-_Z11k_means_gpuiiiPKfS0_Pi .globl _Z6kmeansiiiPKfS0_Pi .type _Z6kmeansiiiPKfS0_Pi, @function _Z6kmeansiiiPKfS0_Pi: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $40, %rsp .cfi_def_cfa_offset 96 movl %edi, %r12d movl %esi, %ebp movl %edx, %ebx movq %rcx, %r13 movq %r8, %r14 movq %r9, %r15 movl $512, 20(%rsp) movl $1, 24(%rsp) movl %edi, 8(%rsp) movl $1, 12(%rsp) movl %esi, %eax imull %edx, %eax cltq movslq %esi, %rdx movslq %ebx, %rcx addq %rcx, %rdx addq %rdx, %rax leal (%rbx,%rbx,2), %edx movslq %edx, %rdx leaq (%rax,%rdx,2), %rax movl $0, %r9d leaq 0(,%rax,4), %r8 movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L11: addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state movq %r15, %r9 movq %r14, %r8 movq %r13, %rcx movl %ebx, %edx movl %ebp, %esi movl %r12d, %edi call _Z40__device_stub__Z11k_means_gpuiiiPKfS0_PiiiiPKfS0_Pi jmp .L11 .cfi_endproc .LFE2057: .size _Z6kmeansiiiPKfS0_Pi, .-_Z6kmeansiiiPKfS0_Pi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z11k_means_gpuiiiPKfS0_Pi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z11k_means_gpuiiiPKfS0_Pi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "cuda_test.hip" .globl _Z26__device_stub__k_means_gpuiiiPKfS0_Pi # -- Begin function _Z26__device_stub__k_means_gpuiiiPKfS0_Pi .p2align 4, 0x90 .type _Z26__device_stub__k_means_gpuiiiPKfS0_Pi,@function _Z26__device_stub__k_means_gpuiiiPKfS0_Pi: # @_Z26__device_stub__k_means_gpuiiiPKfS0_Pi .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 20(%rsp) movl %esi, 16(%rsp) movl %edx, 12(%rsp) movq %rcx, 88(%rsp) movq %r8, 80(%rsp) movq %r9, 72(%rsp) leaq 20(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 88(%rsp), %rax movq %rax, 120(%rsp) leaq 80(%rsp), %rax movq %rax, 128(%rsp) leaq 72(%rsp), %rax movq %rax, 136(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z11k_means_gpuiiiPKfS0_Pi, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z26__device_stub__k_means_gpuiiiPKfS0_Pi, .Lfunc_end0-_Z26__device_stub__k_means_gpuiiiPKfS0_Pi .cfi_endproc # -- End function .globl _Z6kmeansiiiPKfS0_Pi # -- Begin function _Z6kmeansiiiPKfS0_Pi .p2align 4, 0x90 .type _Z6kmeansiiiPKfS0_Pi,@function _Z6kmeansiiiPKfS0_Pi: # @_Z6kmeansiiiPKfS0_Pi .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $152, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %r9, %rbx movq %r8, %r14 movq %rcx, %r15 movl %edx, %r12d movl %esi, %ebp movl %edi, %r13d movl %edx, %eax imull %esi, %eax cltq movslq %esi, %rcx leal (%r12,%r12,2), %edx movslq %edx, %rdx shlq $3, %rdx movslq %r12d, %rsi addq %rcx, %rsi addq %rax, %rsi leaq (%rdx,%rsi,4), %r8 movl %edi, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $512, %rdx # imm = 0x200 movl $1, %esi movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movl %r13d, 20(%rsp) movl %ebp, 16(%rsp) movl %r12d, 12(%rsp) movq %r15, 88(%rsp) movq %r14, 80(%rsp) movq %rbx, 72(%rsp) leaq 20(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 88(%rsp), %rax movq %rax, 120(%rsp) leaq 80(%rsp), %rax movq %rax, 128(%rsp) leaq 72(%rsp), %rax movq %rax, 136(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z11k_means_gpuiiiPKfS0_Pi, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: addq $152, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z6kmeansiiiPKfS0_Pi, .Lfunc_end1-_Z6kmeansiiiPKfS0_Pi .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11k_means_gpuiiiPKfS0_Pi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z11k_means_gpuiiiPKfS0_Pi,@object # @_Z11k_means_gpuiiiPKfS0_Pi .section .rodata,"a",@progbits .globl _Z11k_means_gpuiiiPKfS0_Pi .p2align 3, 0x0 _Z11k_means_gpuiiiPKfS0_Pi: .quad _Z26__device_stub__k_means_gpuiiiPKfS0_Pi .size _Z11k_means_gpuiiiPKfS0_Pi, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z11k_means_gpuiiiPKfS0_Pi" .size .L__unnamed_1, 27 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z26__device_stub__k_means_gpuiiiPKfS0_Pi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z11k_means_gpuiiiPKfS0_Pi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <vector> using Float = float; __global__ void gpu_mul(Float * val, Float * wag, Float * inn, size_t N, size_t M) { auto val_ind = blockIdx.x + blockIdx.y * N; auto wag_ind = blockIdx.x; auto inn_ind = blockIdx.y; if (val_ind < N*N and wag_ind < N and inn_ind < N) { val[blockIdx.x + blockIdx.y * N] = wag[blockIdx.x] * inn[blockIdx.y]; } } void cpu_mul(Float * val, Float * wag, Float * inn, size_t N, size_t M) { Float * row_wag = wag; Float * row_val = val; for (size_t j = 0; j < M; ++j) { for (size_t i = 0; i < N; i += 4) { row_val[i] = row_wag[i] * inn[i]; row_val[i+1] = row_wag[i+1] * inn[i+1]; row_val[i+2] = row_wag[i+2] * inn[i+2]; row_val[i+3] = row_wag[i+3] * inn[i+3]; } row_wag += N; row_val += N; } } int main() { size_t N = 10000; size_t M = 10000; std::vector<Float> v; v.resize(N + N*M + N*M); Float * p = &v[0]; Float * inn = p; Float * wag = p + N; Float * val = p+N+M*N; for (size_t i = 0; i < v.size(); ++i) { //v[i] = (int8_t)randm::uniform_f(-1, +1); } // cpu if (0) { cpu_mul(val, wag, inn, N, M); } // gpu if (1) { Float * gp; cudaMalloc(&gp, N + N*M + N*M); Float * gpu_inn = gp; Float * gpu_wag = gp + N; Float * gpu_val = gp+N+M*N; cudaMemcpy(gp, p, v.size(), cudaMemcpyHostToDevice); dim3 threadsPerBlock(N,M,1); gpu_mul<<<1, threadsPerBlock>>>(gpu_val, gpu_wag, gpu_inn, N, M); cudaMemcpy(p, gp, v.size(), cudaMemcpyDeviceToHost); cudaFree(gp); } return 0; }
code for sm_80 Function : _Z7gpu_mulPfS_S_mm .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */ /* 0x000e220000002500 */ /*0020*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff077624 */ /* 0x000fe400078e00ff */ /*0030*/ IMAD.MOV.U32 R5, RZ, RZ, RZ ; /* 0x000000ffff057224 */ /* 0x000fe200078e00ff */ /*0040*/ S2R R11, SR_CTAID.Y ; /* 0x00000000000b7919 */ /* 0x000e620000002600 */ /*0050*/ IMAD R0, R7, c[0x0][0x17c], RZ ; /* 0x00005f0007007a24 */ /* 0x000fc800078e02ff */ /*0060*/ IMAD R9, R7.reuse, c[0x0][0x17c], R0 ; /* 0x00005f0007097a24 */ /* 0x040fe400078e0200 */ /*0070*/ IMAD.WIDE.U32 R6, R7, c[0x0][0x178], RZ ; /* 0x00005e0007067a25 */ /* 0x000fc800078e00ff */ /*0080*/ IMAD.IADD R0, R7, 0x1, R9 ; /* 0x0000000107007824 */ /* 0x000fe200078e0209 */ /*0090*/ ISETP.GE.U32.AND P1, PT, R4, c[0x0][0x178], PT ; /* 0x00005e0004007a0c */ /* 0x001fe20003f26070 */ /*00a0*/ IMAD.WIDE.U32 R2, R11.reuse, c[0x0][0x178], R4 ; /* 0x00005e000b027a25 */ /* 0x042fe200078e0004 */ /*00b0*/ ISETP.GE.U32.AND P0, PT, R11, c[0x0][0x178], PT ; /* 0x00005e000b007a0c */ /* 0x000fc60003f06070 */ /*00c0*/ IMAD R13, R11, c[0x0][0x17c], R3 ; /* 0x00005f000b0d7a24 */ /* 0x000fe200078e0203 */ /*00d0*/ ISETP.GE.U32.AND P2, PT, R2, R6, PT ; /* 0x000000060200720c */ /* 0x000fc80003f46070 */ /*00e0*/ ISETP.GE.U32.AND.EX P2, PT, R13, R0, PT, P2 ; /* 0x000000000d00720c */ /* 0x000fc80003f46120 */ /*00f0*/ ISETP.GE.U32.OR.EX P1, PT, RZ, c[0x0][0x17c], P2, P1 ; /* 0x00005f00ff007a0c */ /* 0x000fc80001726510 */ /*0100*/ ISETP.GE.U32.OR.EX P0, PT, RZ, c[0x0][0x17c], P1, P0 ; /* 0x00005f00ff007a0c */ /* 0x000fda0000f06500 */ /*0110*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0120*/ LEA R8, P0, R4.reuse, c[0x0][0x168], 0x2 ; /* 0x00005a0004087a11 */ /* 0x040fe200078010ff */ /*0130*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0140*/ LEA R6, P1, R11.reuse, c[0x0][0x170], 0x2 ; /* 0x00005c000b067a11 */ /* 0x040fe400078210ff */ /*0150*/ LEA.HI.X R9, R4, c[0x0][0x16c], RZ, 0x2, P0 ; /* 0x00005b0004097a11 */ /* 0x000fe400000f14ff */ /*0160*/ LEA.HI.X R7, R11, c[0x0][0x174], RZ, 0x2, P1 ; /* 0x00005d000b077a11 */ /* 0x000fc800008f14ff */ /*0170*/ LDG.E R9, [R8.64] ; /* 0x0000000408097981 */ /* 0x000ea8000c1e1900 */ /*0180*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */ /* 0x000ea2000c1e1900 */ /*0190*/ LEA R4, P0, R2, c[0x0][0x160], 0x2 ; /* 0x0000580002047a11 */ /* 0x000fc800078010ff */ /*01a0*/ LEA.HI.X R5, R2, c[0x0][0x164], R13, 0x2, P0 ; /* 0x0000590002057a11 */ /* 0x000fe200000f140d */ /*01b0*/ FMUL R3, R6, R9 ; /* 0x0000000906037220 */ /* 0x004fca0000400000 */ /*01c0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x000fe2000c101904 */ /*01d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <vector> using Float = float; __global__ void gpu_mul(Float * val, Float * wag, Float * inn, size_t N, size_t M) { auto val_ind = blockIdx.x + blockIdx.y * N; auto wag_ind = blockIdx.x; auto inn_ind = blockIdx.y; if (val_ind < N*N and wag_ind < N and inn_ind < N) { val[blockIdx.x + blockIdx.y * N] = wag[blockIdx.x] * inn[blockIdx.y]; } } void cpu_mul(Float * val, Float * wag, Float * inn, size_t N, size_t M) { Float * row_wag = wag; Float * row_val = val; for (size_t j = 0; j < M; ++j) { for (size_t i = 0; i < N; i += 4) { row_val[i] = row_wag[i] * inn[i]; row_val[i+1] = row_wag[i+1] * inn[i+1]; row_val[i+2] = row_wag[i+2] * inn[i+2]; row_val[i+3] = row_wag[i+3] * inn[i+3]; } row_wag += N; row_val += N; } } int main() { size_t N = 10000; size_t M = 10000; std::vector<Float> v; v.resize(N + N*M + N*M); Float * p = &v[0]; Float * inn = p; Float * wag = p + N; Float * val = p+N+M*N; for (size_t i = 0; i < v.size(); ++i) { //v[i] = (int8_t)randm::uniform_f(-1, +1); } // cpu if (0) { cpu_mul(val, wag, inn, N, M); } // gpu if (1) { Float * gp; cudaMalloc(&gp, N + N*M + N*M); Float * gpu_inn = gp; Float * gpu_wag = gp + N; Float * gpu_val = gp+N+M*N; cudaMemcpy(gp, p, v.size(), cudaMemcpyHostToDevice); dim3 threadsPerBlock(N,M,1); gpu_mul<<<1, threadsPerBlock>>>(gpu_val, gpu_wag, gpu_inn, N, M); cudaMemcpy(p, gp, v.size(), cudaMemcpyDeviceToHost); cudaFree(gp); } return 0; }
.file "tmpxft_000b43d9_00000000-6_gmain.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2911: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2911: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z7cpu_mulPfS_S_mm .type _Z7cpu_mulPfS_S_mm, @function _Z7cpu_mulPfS_S_mm: .LFB2896: .cfi_startproc endbr64 testq %r8, %r8 je .L3 leaq 0(,%rcx,4), %r10 movl $0, %r9d jmp .L5 .L6: movss (%rsi,%rax,4), %xmm0 mulss (%rdx,%rax,4), %xmm0 movss %xmm0, (%rdi,%rax,4) movss 4(%rsi,%rax,4), %xmm0 mulss 4(%rdx,%rax,4), %xmm0 movss %xmm0, 4(%rdi,%rax,4) movss 8(%rsi,%rax,4), %xmm0 mulss 8(%rdx,%rax,4), %xmm0 movss %xmm0, 8(%rdi,%rax,4) movss 12(%rsi,%rax,4), %xmm0 mulss 12(%rdx,%rax,4), %xmm0 movss %xmm0, 12(%rdi,%rax,4) addq $4, %rax cmpq %rcx, %rax jb .L6 .L7: addq %r10, %rsi addq %r10, %rdi addq $1, %r9 cmpq %r9, %r8 je .L3 .L5: movl $0, %eax testq %rcx, %rcx jne .L6 jmp .L7 .L3: ret .cfi_endproc .LFE2896: .size _Z7cpu_mulPfS_S_mm, .-_Z7cpu_mulPfS_S_mm .globl _Z32__device_stub__Z7gpu_mulPfS_S_mmPfS_S_mm .type _Z32__device_stub__Z7gpu_mulPfS_S_mmPfS_S_mm, @function _Z32__device_stub__Z7gpu_mulPfS_S_mmPfS_S_mm: .LFB2933: .cfi_startproc endbr64 subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movq %r8, 8(%rsp) movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 8(%rsp), %rax movq %rax, 144(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 152(%rsp), %rax subq %fs:40, %rax jne .L16 addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 184 pushq 56(%rsp) .cfi_def_cfa_offset 192 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z7gpu_mulPfS_S_mm(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2933: .size _Z32__device_stub__Z7gpu_mulPfS_S_mmPfS_S_mm, .-_Z32__device_stub__Z7gpu_mulPfS_S_mmPfS_S_mm .globl _Z7gpu_mulPfS_S_mm .type _Z7gpu_mulPfS_S_mm, @function _Z7gpu_mulPfS_S_mm: .LFB2934: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z7gpu_mulPfS_S_mmPfS_S_mm addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2934: .size _Z7gpu_mulPfS_S_mm, .-_Z7gpu_mulPfS_S_mm .globl main .type main, @function main: .LFB2897: .cfi_startproc .cfi_personality 0x9b,DW.ref.__gxx_personality_v0 .cfi_lsda 0x1b,.LLSDA2897 endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $56, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl $800040000, %edi .LEHB0: call _Znwm@PLT .LEHE0: movq %rax, %rbx movl $0x00000000, (%rax) movl $0, %eax .L20: addq $4, %rax cmpq $800040000, %rax je .L34 movl $0x00000000, (%rbx,%rax) jmp .L20 .L34: movl $0, %eax .L22: addq $1, %rax cmpq $200009999, %rax jbe .L22 leaq 8(%rsp), %rdi movl $200010000, %esi .LEHB1: call cudaMalloc@PLT movq 8(%rsp), %rbp movl $1, %ecx movl $200010000, %edx movq %rbx, %rsi movq %rbp, %rdi call cudaMemcpy@PLT movl $10000, 16(%rsp) movl $10000, 20(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $0, %r9d movl $0, %r8d movq 16(%rsp), %rdx movl $1, %ecx movq 28(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L23 leaq 40000(%rbp), %rsi leaq 400040000(%rbp), %rdi movl $10000, %r8d movl $10000, %ecx movq %rbp, %rdx call _Z32__device_stub__Z7gpu_mulPfS_S_mmPfS_S_mm .L23: movl $2, %ecx movl $200010000, %edx movq 8(%rsp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT .LEHE1: movl $800040000, %esi movq %rbx, %rdi call _ZdlPvm@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L35 movl $0, %eax addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L30: .cfi_restore_state endbr64 movq %rax, %rbp .L27: movq 40(%rsp), %rax subq %fs:40, %rax je .L25 call __stack_chk_fail@PLT .L25: movq %rbp, %rdi .LEHB2: call _Unwind_Resume@PLT .LEHE2: .L29: endbr64 movq %rax, %rbp movl $800040000, %esi movq %rbx, %rdi call _ZdlPvm@PLT jmp .L27 .L35: call __stack_chk_fail@PLT .cfi_endproc .LFE2897: .globl __gxx_personality_v0 .section .gcc_except_table,"a",@progbits .LLSDA2897: .byte 0xff .byte 0xff .byte 0x1 .uleb128 .LLSDACSE2897-.LLSDACSB2897 .LLSDACSB2897: .uleb128 .LEHB0-.LFB2897 .uleb128 .LEHE0-.LEHB0 .uleb128 .L30-.LFB2897 .uleb128 0 .uleb128 .LEHB1-.LFB2897 .uleb128 .LEHE1-.LEHB1 .uleb128 .L29-.LFB2897 .uleb128 0 .uleb128 .LEHB2-.LFB2897 .uleb128 .LEHE2-.LEHB2 .uleb128 0 .uleb128 0 .LLSDACSE2897: .text .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "_Z7gpu_mulPfS_S_mm" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2936: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z7gpu_mulPfS_S_mm(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2936: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .hidden DW.ref.__gxx_personality_v0 .weak DW.ref.__gxx_personality_v0 .section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat .align 8 .type DW.ref.__gxx_personality_v0, @object .size DW.ref.__gxx_personality_v0, 8 DW.ref.__gxx_personality_v0: .quad __gxx_personality_v0 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <vector> using Float = float; __global__ void gpu_mul(Float * val, Float * wag, Float * inn, size_t N, size_t M) { auto val_ind = blockIdx.x + blockIdx.y * N; auto wag_ind = blockIdx.x; auto inn_ind = blockIdx.y; if (val_ind < N*N and wag_ind < N and inn_ind < N) { val[blockIdx.x + blockIdx.y * N] = wag[blockIdx.x] * inn[blockIdx.y]; } } void cpu_mul(Float * val, Float * wag, Float * inn, size_t N, size_t M) { Float * row_wag = wag; Float * row_val = val; for (size_t j = 0; j < M; ++j) { for (size_t i = 0; i < N; i += 4) { row_val[i] = row_wag[i] * inn[i]; row_val[i+1] = row_wag[i+1] * inn[i+1]; row_val[i+2] = row_wag[i+2] * inn[i+2]; row_val[i+3] = row_wag[i+3] * inn[i+3]; } row_wag += N; row_val += N; } } int main() { size_t N = 10000; size_t M = 10000; std::vector<Float> v; v.resize(N + N*M + N*M); Float * p = &v[0]; Float * inn = p; Float * wag = p + N; Float * val = p+N+M*N; for (size_t i = 0; i < v.size(); ++i) { //v[i] = (int8_t)randm::uniform_f(-1, +1); } // cpu if (0) { cpu_mul(val, wag, inn, N, M); } // gpu if (1) { Float * gp; cudaMalloc(&gp, N + N*M + N*M); Float * gpu_inn = gp; Float * gpu_wag = gp + N; Float * gpu_val = gp+N+M*N; cudaMemcpy(gp, p, v.size(), cudaMemcpyHostToDevice); dim3 threadsPerBlock(N,M,1); gpu_mul<<<1, threadsPerBlock>>>(gpu_val, gpu_wag, gpu_inn, N, M); cudaMemcpy(p, gp, v.size(), cudaMemcpyDeviceToHost); cudaFree(gp); } return 0; }
#include <hip/hip_runtime.h> #include <vector> using Float = float; __global__ void gpu_mul(Float * val, Float * wag, Float * inn, size_t N, size_t M) { auto val_ind = blockIdx.x + blockIdx.y * N; auto wag_ind = blockIdx.x; auto inn_ind = blockIdx.y; if (val_ind < N*N and wag_ind < N and inn_ind < N) { val[blockIdx.x + blockIdx.y * N] = wag[blockIdx.x] * inn[blockIdx.y]; } } void cpu_mul(Float * val, Float * wag, Float * inn, size_t N, size_t M) { Float * row_wag = wag; Float * row_val = val; for (size_t j = 0; j < M; ++j) { for (size_t i = 0; i < N; i += 4) { row_val[i] = row_wag[i] * inn[i]; row_val[i+1] = row_wag[i+1] * inn[i+1]; row_val[i+2] = row_wag[i+2] * inn[i+2]; row_val[i+3] = row_wag[i+3] * inn[i+3]; } row_wag += N; row_val += N; } } int main() { size_t N = 10000; size_t M = 10000; std::vector<Float> v; v.resize(N + N*M + N*M); Float * p = &v[0]; Float * inn = p; Float * wag = p + N; Float * val = p+N+M*N; for (size_t i = 0; i < v.size(); ++i) { //v[i] = (int8_t)randm::uniform_f(-1, +1); } // cpu if (0) { cpu_mul(val, wag, inn, N, M); } // gpu if (1) { Float * gp; hipMalloc(&gp, N + N*M + N*M); Float * gpu_inn = gp; Float * gpu_wag = gp + N; Float * gpu_val = gp+N+M*N; hipMemcpy(gp, p, v.size(), hipMemcpyHostToDevice); dim3 threadsPerBlock(N,M,1); gpu_mul<<<1, threadsPerBlock>>>(gpu_val, gpu_wag, gpu_inn, N, M); hipMemcpy(p, gp, v.size(), hipMemcpyDeviceToHost); hipFree(gp); } return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <vector> using Float = float; __global__ void gpu_mul(Float * val, Float * wag, Float * inn, size_t N, size_t M) { auto val_ind = blockIdx.x + blockIdx.y * N; auto wag_ind = blockIdx.x; auto inn_ind = blockIdx.y; if (val_ind < N*N and wag_ind < N and inn_ind < N) { val[blockIdx.x + blockIdx.y * N] = wag[blockIdx.x] * inn[blockIdx.y]; } } void cpu_mul(Float * val, Float * wag, Float * inn, size_t N, size_t M) { Float * row_wag = wag; Float * row_val = val; for (size_t j = 0; j < M; ++j) { for (size_t i = 0; i < N; i += 4) { row_val[i] = row_wag[i] * inn[i]; row_val[i+1] = row_wag[i+1] * inn[i+1]; row_val[i+2] = row_wag[i+2] * inn[i+2]; row_val[i+3] = row_wag[i+3] * inn[i+3]; } row_wag += N; row_val += N; } } int main() { size_t N = 10000; size_t M = 10000; std::vector<Float> v; v.resize(N + N*M + N*M); Float * p = &v[0]; Float * inn = p; Float * wag = p + N; Float * val = p+N+M*N; for (size_t i = 0; i < v.size(); ++i) { //v[i] = (int8_t)randm::uniform_f(-1, +1); } // cpu if (0) { cpu_mul(val, wag, inn, N, M); } // gpu if (1) { Float * gp; hipMalloc(&gp, N + N*M + N*M); Float * gpu_inn = gp; Float * gpu_wag = gp + N; Float * gpu_val = gp+N+M*N; hipMemcpy(gp, p, v.size(), hipMemcpyHostToDevice); dim3 threadsPerBlock(N,M,1); gpu_mul<<<1, threadsPerBlock>>>(gpu_val, gpu_wag, gpu_inn, N, M); hipMemcpy(p, gp, v.size(), hipMemcpyDeviceToHost); hipFree(gp); } return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z7gpu_mulPfS_S_mm .globl _Z7gpu_mulPfS_S_mm .p2align 8 .type _Z7gpu_mulPfS_S_mm,@function _Z7gpu_mulPfS_S_mm: s_load_b64 s[6:7], s[0:1], 0x18 s_mov_b32 s2, s15 s_mov_b32 s15, 0 s_waitcnt lgkmcnt(0) s_mul_i32 s3, s2, s7 s_mul_hi_u32 s5, s2, s6 s_mul_i32 s4, s2, s6 s_mul_i32 s11, s6, s7 s_mul_hi_u32 s10, s6, s6 s_add_i32 s5, s5, s3 s_add_u32 s8, s4, s14 s_addc_u32 s9, s5, 0 s_add_i32 s3, s10, s11 s_mul_i32 s10, s6, s6 s_add_i32 s11, s3, s11 s_mov_b32 s3, s15 v_cmp_ge_u64_e64 s8, s[8:9], s[10:11] v_cmp_ge_u64_e64 s9, s[14:15], s[6:7] v_cmp_ge_u64_e64 s6, s[2:3], s[6:7] s_delay_alu instid0(VALU_DEP_2) s_or_b32 s7, s9, s8 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) s_or_b32 s6, s6, s7 s_delay_alu instid0(SALU_CYCLE_1) s_and_b32 vcc_lo, exec_lo, s6 s_cbranch_vccnz .LBB0_2 s_clause 0x1 s_load_b128 s[8:11], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 s_lshl_b64 s[6:7], s[14:15], 2 v_mov_b32_e32 v1, 0 s_waitcnt lgkmcnt(0) s_add_u32 s10, s10, s6 s_addc_u32 s11, s11, s7 s_lshl_b64 s[2:3], s[2:3], 2 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) s_add_u32 s0, s0, s2 s_addc_u32 s1, s1, s3 s_load_b32 s2, s[10:11], 0x0 s_load_b32 s3, s[0:1], 0x0 s_lshl_b64 s[0:1], s[4:5], 2 s_add_u32 s0, s8, s0 s_addc_u32 s1, s9, s1 s_add_u32 s0, s0, s6 s_addc_u32 s1, s1, s7 s_waitcnt lgkmcnt(0) v_mul_f32_e64 v0, s2, s3 global_store_b32 v1, v0, s[0:1] .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z7gpu_mulPfS_S_mm .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 40 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 2 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z7gpu_mulPfS_S_mm, .Lfunc_end0-_Z7gpu_mulPfS_S_mm .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 8 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 40 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z7gpu_mulPfS_S_mm .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z7gpu_mulPfS_S_mm.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 2 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <vector> using Float = float; __global__ void gpu_mul(Float * val, Float * wag, Float * inn, size_t N, size_t M) { auto val_ind = blockIdx.x + blockIdx.y * N; auto wag_ind = blockIdx.x; auto inn_ind = blockIdx.y; if (val_ind < N*N and wag_ind < N and inn_ind < N) { val[blockIdx.x + blockIdx.y * N] = wag[blockIdx.x] * inn[blockIdx.y]; } } void cpu_mul(Float * val, Float * wag, Float * inn, size_t N, size_t M) { Float * row_wag = wag; Float * row_val = val; for (size_t j = 0; j < M; ++j) { for (size_t i = 0; i < N; i += 4) { row_val[i] = row_wag[i] * inn[i]; row_val[i+1] = row_wag[i+1] * inn[i+1]; row_val[i+2] = row_wag[i+2] * inn[i+2]; row_val[i+3] = row_wag[i+3] * inn[i+3]; } row_wag += N; row_val += N; } } int main() { size_t N = 10000; size_t M = 10000; std::vector<Float> v; v.resize(N + N*M + N*M); Float * p = &v[0]; Float * inn = p; Float * wag = p + N; Float * val = p+N+M*N; for (size_t i = 0; i < v.size(); ++i) { //v[i] = (int8_t)randm::uniform_f(-1, +1); } // cpu if (0) { cpu_mul(val, wag, inn, N, M); } // gpu if (1) { Float * gp; hipMalloc(&gp, N + N*M + N*M); Float * gpu_inn = gp; Float * gpu_wag = gp + N; Float * gpu_val = gp+N+M*N; hipMemcpy(gp, p, v.size(), hipMemcpyHostToDevice); dim3 threadsPerBlock(N,M,1); gpu_mul<<<1, threadsPerBlock>>>(gpu_val, gpu_wag, gpu_inn, N, M); hipMemcpy(p, gp, v.size(), hipMemcpyDeviceToHost); hipFree(gp); } return 0; }
.text .file "gmain.hip" .globl _Z22__device_stub__gpu_mulPfS_S_mm # -- Begin function _Z22__device_stub__gpu_mulPfS_S_mm .p2align 4, 0x90 .type _Z22__device_stub__gpu_mulPfS_S_mm,@function _Z22__device_stub__gpu_mulPfS_S_mm: # @_Z22__device_stub__gpu_mulPfS_S_mm .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z7gpu_mulPfS_S_mm, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $152, %rsp .cfi_adjust_cfa_offset -152 retq .Lfunc_end0: .size _Z22__device_stub__gpu_mulPfS_S_mm, .Lfunc_end0-_Z22__device_stub__gpu_mulPfS_S_mm .cfi_endproc # -- End function .globl _Z7cpu_mulPfS_S_mm # -- Begin function _Z7cpu_mulPfS_S_mm .p2align 4, 0x90 .type _Z7cpu_mulPfS_S_mm,@function _Z7cpu_mulPfS_S_mm: # @_Z7cpu_mulPfS_S_mm .cfi_startproc # %bb.0: testq %r8, %r8 je .LBB1_6 # %bb.1: # %.preheader.lr.ph leaq (,%rcx,4), %rax xorl %r9d, %r9d jmp .LBB1_2 .p2align 4, 0x90 .LBB1_5: # %._crit_edge # in Loop: Header=BB1_2 Depth=1 incq %r9 addq %rax, %rdi addq %rax, %rsi cmpq %r8, %r9 je .LBB1_6 .LBB1_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_4 Depth 2 testq %rcx, %rcx je .LBB1_5 # %bb.3: # %.lr.ph.preheader # in Loop: Header=BB1_2 Depth=1 xorl %r10d, %r10d .p2align 4, 0x90 .LBB1_4: # %.lr.ph # Parent Loop BB1_2 Depth=1 # => This Inner Loop Header: Depth=2 movss (%rsi,%r10,4), %xmm0 # xmm0 = mem[0],zero,zero,zero mulss (%rdx,%r10,4), %xmm0 movss %xmm0, (%rdi,%r10,4) movss 4(%rsi,%r10,4), %xmm0 # xmm0 = mem[0],zero,zero,zero mulss 4(%rdx,%r10,4), %xmm0 movss %xmm0, 4(%rdi,%r10,4) movss 8(%rsi,%r10,4), %xmm0 # xmm0 = mem[0],zero,zero,zero mulss 8(%rdx,%r10,4), %xmm0 movss %xmm0, 8(%rdi,%r10,4) movss 12(%rsi,%r10,4), %xmm0 # xmm0 = mem[0],zero,zero,zero mulss 12(%rdx,%r10,4), %xmm0 movss %xmm0, 12(%rdi,%r10,4) addq $4, %r10 cmpq %rcx, %r10 jb .LBB1_4 jmp .LBB1_5 .LBB1_6: # %._crit_edge43 retq .Lfunc_end1: .size _Z7cpu_mulPfS_S_mm, .Lfunc_end1-_Z7cpu_mulPfS_S_mm .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .Lfunc_begin0: .cfi_startproc .cfi_personality 3, __gxx_personality_v0 .cfi_lsda 3, .Lexception0 # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $168, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 xorps %xmm0, %xmm0 movaps %xmm0, (%rsp) movq $0, 16(%rsp) movq 8(%rsp), %rcx movq %rcx, %rax sarq $2, %rax cmpq $200009999, %rax # imm = 0xBEBE90F ja .LBB2_2 # %bb.1: movl $200010000, %esi # imm = 0xBEBE910 subq %rax, %rsi .Ltmp0: .cfi_escape 0x2e, 0x00 movq %rsp, %rdi callq _ZNSt6vectorIfSaIfEE17_M_default_appendEm .Ltmp1: jmp .LBB2_4 .LBB2_2: cmpq $800040000, %rcx # imm = 0x2FAFA440 je .LBB2_4 # %bb.3: movq $800040000, 8(%rsp) # imm = 0x2FAFA440 .LBB2_4: # %_ZNSt6vectorIfSaIfEE6resizeEm.exit movq (%rsp), %rbx .Ltmp3: .cfi_escape 0x2e, 0x00 leaq 32(%rsp), %rdi movl $200010000, %esi # imm = 0xBEBE910 callq hipMalloc .Ltmp4: # %bb.5: # %_ZL9hipMallocIfE10hipError_tPPT_m.exit movq 32(%rsp), %r14 movq 8(%rsp), %rdx subq (%rsp), %rdx sarq $2, %rdx .Ltmp6: .cfi_escape 0x2e, 0x00 movq %r14, %rdi movq %rbx, %rsi movl $1, %ecx callq hipMemcpy .Ltmp7: # %bb.6: .Ltmp9: .cfi_escape 0x2e, 0x00 movabsq $4294967297, %rdi # imm = 0x100000001 movabsq $42949672970000, %rdx # imm = 0x271000002710 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration .Ltmp10: # %bb.7: testl %eax, %eax jne .LBB2_10 # %bb.8: leaq 40000(%r14), %rax leaq 400040000(%r14), %rcx movq %rcx, 120(%rsp) movq %rax, 112(%rsp) movq %r14, 104(%rsp) movq $10000, 96(%rsp) # imm = 0x2710 movq $10000, 88(%rsp) # imm = 0x2710 leaq 120(%rsp), %rax movq %rax, 128(%rsp) leaq 112(%rsp), %rax movq %rax, 136(%rsp) leaq 104(%rsp), %rax movq %rax, 144(%rsp) leaq 96(%rsp), %rax movq %rax, 152(%rsp) leaq 88(%rsp), %rax movq %rax, 160(%rsp) .Ltmp11: .cfi_escape 0x2e, 0x00 leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration .Ltmp12: # %bb.9: # %.noexc46 movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d .Ltmp13: .cfi_escape 0x2e, 0x10 leaq 128(%rsp), %r9 movl $_Z7gpu_mulPfS_S_mm, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .Ltmp14: .LBB2_10: movq 32(%rsp), %rsi movq 8(%rsp), %rdx subq (%rsp), %rdx sarq $2, %rdx .Ltmp15: .cfi_escape 0x2e, 0x00 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy .Ltmp16: # %bb.11: movq 32(%rsp), %rdi .Ltmp17: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp18: # %bb.12: movq (%rsp), %rdi testq %rdi, %rdi je .LBB2_14 # %bb.13: .cfi_escape 0x2e, 0x00 callq _ZdlPv .LBB2_14: # %_ZNSt6vectorIfSaIfEED2Ev.exit xorl %eax, %eax addq $168, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .LBB2_21: .cfi_def_cfa_offset 192 .Ltmp2: jmp .LBB2_18 .LBB2_15: .Ltmp8: jmp .LBB2_18 .LBB2_17: .Ltmp5: jmp .LBB2_18 .LBB2_16: .Ltmp19: .LBB2_18: movq %rax, %rbx movq (%rsp), %rdi testq %rdi, %rdi je .LBB2_20 # %bb.19: .cfi_escape 0x2e, 0x00 callq _ZdlPv .LBB2_20: # %_ZNSt6vectorIfSaIfEED2Ev.exit49 .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _Unwind_Resume@PLT .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc .section .gcc_except_table,"a",@progbits .p2align 2, 0x0 GCC_except_table2: .Lexception0: .byte 255 # @LPStart Encoding = omit .byte 255 # @TType Encoding = omit .byte 1 # Call site Encoding = uleb128 .uleb128 .Lcst_end0-.Lcst_begin0 .Lcst_begin0: .uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 1 << .uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1 .uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2 .byte 0 # On action: cleanup .uleb128 .Ltmp3-.Lfunc_begin0 # >> Call Site 2 << .uleb128 .Ltmp4-.Ltmp3 # Call between .Ltmp3 and .Ltmp4 .uleb128 .Ltmp5-.Lfunc_begin0 # jumps to .Ltmp5 .byte 0 # On action: cleanup .uleb128 .Ltmp6-.Lfunc_begin0 # >> Call Site 3 << .uleb128 .Ltmp7-.Ltmp6 # Call between .Ltmp6 and .Ltmp7 .uleb128 .Ltmp8-.Lfunc_begin0 # jumps to .Ltmp8 .byte 0 # On action: cleanup .uleb128 .Ltmp9-.Lfunc_begin0 # >> Call Site 4 << .uleb128 .Ltmp18-.Ltmp9 # Call between .Ltmp9 and .Ltmp18 .uleb128 .Ltmp19-.Lfunc_begin0 # jumps to .Ltmp19 .byte 0 # On action: cleanup .uleb128 .Ltmp18-.Lfunc_begin0 # >> Call Site 5 << .uleb128 .Lfunc_end2-.Ltmp18 # Call between .Ltmp18 and .Lfunc_end2 .byte 0 # has no landing pad .byte 0 # On action: cleanup .Lcst_end0: .p2align 2, 0x0 # -- End function .section .text._ZNSt6vectorIfSaIfEE17_M_default_appendEm,"axG",@progbits,_ZNSt6vectorIfSaIfEE17_M_default_appendEm,comdat .weak _ZNSt6vectorIfSaIfEE17_M_default_appendEm # -- Begin function _ZNSt6vectorIfSaIfEE17_M_default_appendEm .p2align 4, 0x90 .type _ZNSt6vectorIfSaIfEE17_M_default_appendEm,@function _ZNSt6vectorIfSaIfEE17_M_default_appendEm: # @_ZNSt6vectorIfSaIfEE17_M_default_appendEm .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $24, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 testq %rsi, %rsi je .LBB3_16 # %bb.1: movq %rsi, %r14 movq %rdi, %rbx movq 8(%rdi), %r12 movq 16(%rdi), %rax subq %r12, %rax sarq $2, %rax cmpq %rsi, %rax jae .LBB3_2 # %bb.5: movabsq $2305843009213693951, %rax # imm = 0x1FFFFFFFFFFFFFFF movq (%rbx), %rcx movq %rcx, 16(%rsp) # 8-byte Spill subq %rcx, %r12 movq %r12, %r15 sarq $2, %r15 movq %r15, %rcx xorq %rax, %rcx cmpq %r14, %rcx jb .LBB3_17 # %bb.6: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit cmpq %r14, %r15 movq %r14, %rcx cmovaq %r15, %rcx leaq (%rcx,%r15), %rbp cmpq %rax, %rbp cmovaeq %rax, %rbp addq %r15, %rcx cmovbq %rax, %rbp testq %rbp, %rbp je .LBB3_7 # %bb.8: leaq (,%rbp,4), %rdi callq _Znwm movq %rax, %r13 jmp .LBB3_9 .LBB3_2: movl $0, (%r12) leaq 4(%r12), %rdi cmpq $1, %r14 je .LBB3_4 # %bb.3: # %_ZSt6fill_nIPfmfET_S1_T0_RKT1_.exit.loopexit.i.i.i leaq -4(,%r14,4), %rdx xorl %esi, %esi callq memset@PLT leaq (%r12,%r14,4), %rdi .LBB3_4: # %_ZSt27__uninitialized_default_n_aIPfmfET_S1_T0_RSaIT1_E.exit movq %rdi, 8(%rbx) jmp .LBB3_16 .LBB3_7: xorl %r13d, %r13d .LBB3_9: # %_ZNSt12_Vector_baseIfSaIfEE11_M_allocateEm.exit leaq (,%r15,4), %rax addq %r13, %rax movq %rax, 8(%rsp) # 8-byte Spill movl $0, (%r13,%r15,4) cmpq $1, %r14 je .LBB3_11 # %bb.10: # %_ZSt6fill_nIPfmfET_S1_T0_RKT1_.exit.loopexit.i.i.i30 movq 8(%rsp), %rax # 8-byte Reload leaq 4(%rax), %rdi leaq -4(,%r14,4), %rdx xorl %esi, %esi callq memset@PLT .LBB3_11: # %_ZSt27__uninitialized_default_n_aIPfmfET_S1_T0_RSaIT1_E.exit32 testq %r12, %r12 movq 16(%rsp), %r15 # 8-byte Reload jle .LBB3_13 # %bb.12: movq %r13, %rdi movq %r15, %rsi movq %r12, %rdx callq memmove@PLT .LBB3_13: # %_ZNSt6vectorIfSaIfEE11_S_relocateEPfS2_S2_RS0_.exit testq %r15, %r15 je .LBB3_15 # %bb.14: movq %r15, %rdi callq _ZdlPv .LBB3_15: # %_ZNSt12_Vector_baseIfSaIfEE13_M_deallocateEPfm.exit35 movq %r13, (%rbx) movq 8(%rsp), %rax # 8-byte Reload leaq (%rax,%r14,4), %rax movq %rax, 8(%rbx) leaq (,%rbp,4), %rax addq %r13, %rax movq %rax, 16(%rbx) .LBB3_16: addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB3_17: .cfi_def_cfa_offset 80 movl $.L.str, %edi callq _ZSt20__throw_length_errorPKc .Lfunc_end3: .size _ZNSt6vectorIfSaIfEE17_M_default_appendEm, .Lfunc_end3-_ZNSt6vectorIfSaIfEE17_M_default_appendEm .cfi_endproc # -- End function .text .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7gpu_mulPfS_S_mm, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z7gpu_mulPfS_S_mm,@object # @_Z7gpu_mulPfS_S_mm .section .rodata,"a",@progbits .globl _Z7gpu_mulPfS_S_mm .p2align 3, 0x0 _Z7gpu_mulPfS_S_mm: .quad _Z22__device_stub__gpu_mulPfS_S_mm .size _Z7gpu_mulPfS_S_mm, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "vector::_M_default_append" .size .L.str, 26 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z7gpu_mulPfS_S_mm" .size .L__unnamed_1, 19 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z22__device_stub__gpu_mulPfS_S_mm .addrsig_sym __gxx_personality_v0 .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Unwind_Resume .addrsig_sym _Z7gpu_mulPfS_S_mm .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z7gpu_mulPfS_S_mm .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */ /* 0x000e220000002500 */ /*0020*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff077624 */ /* 0x000fe400078e00ff */ /*0030*/ IMAD.MOV.U32 R5, RZ, RZ, RZ ; /* 0x000000ffff057224 */ /* 0x000fe200078e00ff */ /*0040*/ S2R R11, SR_CTAID.Y ; /* 0x00000000000b7919 */ /* 0x000e620000002600 */ /*0050*/ IMAD R0, R7, c[0x0][0x17c], RZ ; /* 0x00005f0007007a24 */ /* 0x000fc800078e02ff */ /*0060*/ IMAD R9, R7.reuse, c[0x0][0x17c], R0 ; /* 0x00005f0007097a24 */ /* 0x040fe400078e0200 */ /*0070*/ IMAD.WIDE.U32 R6, R7, c[0x0][0x178], RZ ; /* 0x00005e0007067a25 */ /* 0x000fc800078e00ff */ /*0080*/ IMAD.IADD R0, R7, 0x1, R9 ; /* 0x0000000107007824 */ /* 0x000fe200078e0209 */ /*0090*/ ISETP.GE.U32.AND P1, PT, R4, c[0x0][0x178], PT ; /* 0x00005e0004007a0c */ /* 0x001fe20003f26070 */ /*00a0*/ IMAD.WIDE.U32 R2, R11.reuse, c[0x0][0x178], R4 ; /* 0x00005e000b027a25 */ /* 0x042fe200078e0004 */ /*00b0*/ ISETP.GE.U32.AND P0, PT, R11, c[0x0][0x178], PT ; /* 0x00005e000b007a0c */ /* 0x000fc60003f06070 */ /*00c0*/ IMAD R13, R11, c[0x0][0x17c], R3 ; /* 0x00005f000b0d7a24 */ /* 0x000fe200078e0203 */ /*00d0*/ ISETP.GE.U32.AND P2, PT, R2, R6, PT ; /* 0x000000060200720c */ /* 0x000fc80003f46070 */ /*00e0*/ ISETP.GE.U32.AND.EX P2, PT, R13, R0, PT, P2 ; /* 0x000000000d00720c */ /* 0x000fc80003f46120 */ /*00f0*/ ISETP.GE.U32.OR.EX P1, PT, RZ, c[0x0][0x17c], P2, P1 ; /* 0x00005f00ff007a0c */ /* 0x000fc80001726510 */ /*0100*/ ISETP.GE.U32.OR.EX P0, PT, RZ, c[0x0][0x17c], P1, P0 ; /* 0x00005f00ff007a0c */ /* 0x000fda0000f06500 */ /*0110*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0120*/ LEA R8, P0, R4.reuse, c[0x0][0x168], 0x2 ; /* 0x00005a0004087a11 */ /* 0x040fe200078010ff */ /*0130*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0140*/ LEA R6, P1, R11.reuse, c[0x0][0x170], 0x2 ; /* 0x00005c000b067a11 */ /* 0x040fe400078210ff */ /*0150*/ LEA.HI.X R9, R4, c[0x0][0x16c], RZ, 0x2, P0 ; /* 0x00005b0004097a11 */ /* 0x000fe400000f14ff */ /*0160*/ LEA.HI.X R7, R11, c[0x0][0x174], RZ, 0x2, P1 ; /* 0x00005d000b077a11 */ /* 0x000fc800008f14ff */ /*0170*/ LDG.E R9, [R8.64] ; /* 0x0000000408097981 */ /* 0x000ea8000c1e1900 */ /*0180*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */ /* 0x000ea2000c1e1900 */ /*0190*/ LEA R4, P0, R2, c[0x0][0x160], 0x2 ; /* 0x0000580002047a11 */ /* 0x000fc800078010ff */ /*01a0*/ LEA.HI.X R5, R2, c[0x0][0x164], R13, 0x2, P0 ; /* 0x0000590002057a11 */ /* 0x000fe200000f140d */ /*01b0*/ FMUL R3, R6, R9 ; /* 0x0000000906037220 */ /* 0x004fca0000400000 */ /*01c0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x000fe2000c101904 */ /*01d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z7gpu_mulPfS_S_mm .globl _Z7gpu_mulPfS_S_mm .p2align 8 .type _Z7gpu_mulPfS_S_mm,@function _Z7gpu_mulPfS_S_mm: s_load_b64 s[6:7], s[0:1], 0x18 s_mov_b32 s2, s15 s_mov_b32 s15, 0 s_waitcnt lgkmcnt(0) s_mul_i32 s3, s2, s7 s_mul_hi_u32 s5, s2, s6 s_mul_i32 s4, s2, s6 s_mul_i32 s11, s6, s7 s_mul_hi_u32 s10, s6, s6 s_add_i32 s5, s5, s3 s_add_u32 s8, s4, s14 s_addc_u32 s9, s5, 0 s_add_i32 s3, s10, s11 s_mul_i32 s10, s6, s6 s_add_i32 s11, s3, s11 s_mov_b32 s3, s15 v_cmp_ge_u64_e64 s8, s[8:9], s[10:11] v_cmp_ge_u64_e64 s9, s[14:15], s[6:7] v_cmp_ge_u64_e64 s6, s[2:3], s[6:7] s_delay_alu instid0(VALU_DEP_2) s_or_b32 s7, s9, s8 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) s_or_b32 s6, s6, s7 s_delay_alu instid0(SALU_CYCLE_1) s_and_b32 vcc_lo, exec_lo, s6 s_cbranch_vccnz .LBB0_2 s_clause 0x1 s_load_b128 s[8:11], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 s_lshl_b64 s[6:7], s[14:15], 2 v_mov_b32_e32 v1, 0 s_waitcnt lgkmcnt(0) s_add_u32 s10, s10, s6 s_addc_u32 s11, s11, s7 s_lshl_b64 s[2:3], s[2:3], 2 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) s_add_u32 s0, s0, s2 s_addc_u32 s1, s1, s3 s_load_b32 s2, s[10:11], 0x0 s_load_b32 s3, s[0:1], 0x0 s_lshl_b64 s[0:1], s[4:5], 2 s_add_u32 s0, s8, s0 s_addc_u32 s1, s9, s1 s_add_u32 s0, s0, s6 s_addc_u32 s1, s1, s7 s_waitcnt lgkmcnt(0) v_mul_f32_e64 v0, s2, s3 global_store_b32 v1, v0, s[0:1] .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z7gpu_mulPfS_S_mm .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 40 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 2 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z7gpu_mulPfS_S_mm, .Lfunc_end0-_Z7gpu_mulPfS_S_mm .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 8 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 40 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z7gpu_mulPfS_S_mm .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z7gpu_mulPfS_S_mm.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 2 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000b43d9_00000000-6_gmain.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2911: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2911: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z7cpu_mulPfS_S_mm .type _Z7cpu_mulPfS_S_mm, @function _Z7cpu_mulPfS_S_mm: .LFB2896: .cfi_startproc endbr64 testq %r8, %r8 je .L3 leaq 0(,%rcx,4), %r10 movl $0, %r9d jmp .L5 .L6: movss (%rsi,%rax,4), %xmm0 mulss (%rdx,%rax,4), %xmm0 movss %xmm0, (%rdi,%rax,4) movss 4(%rsi,%rax,4), %xmm0 mulss 4(%rdx,%rax,4), %xmm0 movss %xmm0, 4(%rdi,%rax,4) movss 8(%rsi,%rax,4), %xmm0 mulss 8(%rdx,%rax,4), %xmm0 movss %xmm0, 8(%rdi,%rax,4) movss 12(%rsi,%rax,4), %xmm0 mulss 12(%rdx,%rax,4), %xmm0 movss %xmm0, 12(%rdi,%rax,4) addq $4, %rax cmpq %rcx, %rax jb .L6 .L7: addq %r10, %rsi addq %r10, %rdi addq $1, %r9 cmpq %r9, %r8 je .L3 .L5: movl $0, %eax testq %rcx, %rcx jne .L6 jmp .L7 .L3: ret .cfi_endproc .LFE2896: .size _Z7cpu_mulPfS_S_mm, .-_Z7cpu_mulPfS_S_mm .globl _Z32__device_stub__Z7gpu_mulPfS_S_mmPfS_S_mm .type _Z32__device_stub__Z7gpu_mulPfS_S_mmPfS_S_mm, @function _Z32__device_stub__Z7gpu_mulPfS_S_mmPfS_S_mm: .LFB2933: .cfi_startproc endbr64 subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movq %r8, 8(%rsp) movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 8(%rsp), %rax movq %rax, 144(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 152(%rsp), %rax subq %fs:40, %rax jne .L16 addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 184 pushq 56(%rsp) .cfi_def_cfa_offset 192 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z7gpu_mulPfS_S_mm(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2933: .size _Z32__device_stub__Z7gpu_mulPfS_S_mmPfS_S_mm, .-_Z32__device_stub__Z7gpu_mulPfS_S_mmPfS_S_mm .globl _Z7gpu_mulPfS_S_mm .type _Z7gpu_mulPfS_S_mm, @function _Z7gpu_mulPfS_S_mm: .LFB2934: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z7gpu_mulPfS_S_mmPfS_S_mm addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2934: .size _Z7gpu_mulPfS_S_mm, .-_Z7gpu_mulPfS_S_mm .globl main .type main, @function main: .LFB2897: .cfi_startproc .cfi_personality 0x9b,DW.ref.__gxx_personality_v0 .cfi_lsda 0x1b,.LLSDA2897 endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $56, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl $800040000, %edi .LEHB0: call _Znwm@PLT .LEHE0: movq %rax, %rbx movl $0x00000000, (%rax) movl $0, %eax .L20: addq $4, %rax cmpq $800040000, %rax je .L34 movl $0x00000000, (%rbx,%rax) jmp .L20 .L34: movl $0, %eax .L22: addq $1, %rax cmpq $200009999, %rax jbe .L22 leaq 8(%rsp), %rdi movl $200010000, %esi .LEHB1: call cudaMalloc@PLT movq 8(%rsp), %rbp movl $1, %ecx movl $200010000, %edx movq %rbx, %rsi movq %rbp, %rdi call cudaMemcpy@PLT movl $10000, 16(%rsp) movl $10000, 20(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $0, %r9d movl $0, %r8d movq 16(%rsp), %rdx movl $1, %ecx movq 28(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L23 leaq 40000(%rbp), %rsi leaq 400040000(%rbp), %rdi movl $10000, %r8d movl $10000, %ecx movq %rbp, %rdx call _Z32__device_stub__Z7gpu_mulPfS_S_mmPfS_S_mm .L23: movl $2, %ecx movl $200010000, %edx movq 8(%rsp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT movq 8(%rsp), %rdi call cudaFree@PLT .LEHE1: movl $800040000, %esi movq %rbx, %rdi call _ZdlPvm@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L35 movl $0, %eax addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L30: .cfi_restore_state endbr64 movq %rax, %rbp .L27: movq 40(%rsp), %rax subq %fs:40, %rax je .L25 call __stack_chk_fail@PLT .L25: movq %rbp, %rdi .LEHB2: call _Unwind_Resume@PLT .LEHE2: .L29: endbr64 movq %rax, %rbp movl $800040000, %esi movq %rbx, %rdi call _ZdlPvm@PLT jmp .L27 .L35: call __stack_chk_fail@PLT .cfi_endproc .LFE2897: .globl __gxx_personality_v0 .section .gcc_except_table,"a",@progbits .LLSDA2897: .byte 0xff .byte 0xff .byte 0x1 .uleb128 .LLSDACSE2897-.LLSDACSB2897 .LLSDACSB2897: .uleb128 .LEHB0-.LFB2897 .uleb128 .LEHE0-.LEHB0 .uleb128 .L30-.LFB2897 .uleb128 0 .uleb128 .LEHB1-.LFB2897 .uleb128 .LEHE1-.LEHB1 .uleb128 .L29-.LFB2897 .uleb128 0 .uleb128 .LEHB2-.LFB2897 .uleb128 .LEHE2-.LEHB2 .uleb128 0 .uleb128 0 .LLSDACSE2897: .text .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "_Z7gpu_mulPfS_S_mm" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2936: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z7gpu_mulPfS_S_mm(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2936: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .hidden DW.ref.__gxx_personality_v0 .weak DW.ref.__gxx_personality_v0 .section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat .align 8 .type DW.ref.__gxx_personality_v0, @object .size DW.ref.__gxx_personality_v0, 8 DW.ref.__gxx_personality_v0: .quad __gxx_personality_v0 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "gmain.hip" .globl _Z22__device_stub__gpu_mulPfS_S_mm # -- Begin function _Z22__device_stub__gpu_mulPfS_S_mm .p2align 4, 0x90 .type _Z22__device_stub__gpu_mulPfS_S_mm,@function _Z22__device_stub__gpu_mulPfS_S_mm: # @_Z22__device_stub__gpu_mulPfS_S_mm .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z7gpu_mulPfS_S_mm, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $152, %rsp .cfi_adjust_cfa_offset -152 retq .Lfunc_end0: .size _Z22__device_stub__gpu_mulPfS_S_mm, .Lfunc_end0-_Z22__device_stub__gpu_mulPfS_S_mm .cfi_endproc # -- End function .globl _Z7cpu_mulPfS_S_mm # -- Begin function _Z7cpu_mulPfS_S_mm .p2align 4, 0x90 .type _Z7cpu_mulPfS_S_mm,@function _Z7cpu_mulPfS_S_mm: # @_Z7cpu_mulPfS_S_mm .cfi_startproc # %bb.0: testq %r8, %r8 je .LBB1_6 # %bb.1: # %.preheader.lr.ph leaq (,%rcx,4), %rax xorl %r9d, %r9d jmp .LBB1_2 .p2align 4, 0x90 .LBB1_5: # %._crit_edge # in Loop: Header=BB1_2 Depth=1 incq %r9 addq %rax, %rdi addq %rax, %rsi cmpq %r8, %r9 je .LBB1_6 .LBB1_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_4 Depth 2 testq %rcx, %rcx je .LBB1_5 # %bb.3: # %.lr.ph.preheader # in Loop: Header=BB1_2 Depth=1 xorl %r10d, %r10d .p2align 4, 0x90 .LBB1_4: # %.lr.ph # Parent Loop BB1_2 Depth=1 # => This Inner Loop Header: Depth=2 movss (%rsi,%r10,4), %xmm0 # xmm0 = mem[0],zero,zero,zero mulss (%rdx,%r10,4), %xmm0 movss %xmm0, (%rdi,%r10,4) movss 4(%rsi,%r10,4), %xmm0 # xmm0 = mem[0],zero,zero,zero mulss 4(%rdx,%r10,4), %xmm0 movss %xmm0, 4(%rdi,%r10,4) movss 8(%rsi,%r10,4), %xmm0 # xmm0 = mem[0],zero,zero,zero mulss 8(%rdx,%r10,4), %xmm0 movss %xmm0, 8(%rdi,%r10,4) movss 12(%rsi,%r10,4), %xmm0 # xmm0 = mem[0],zero,zero,zero mulss 12(%rdx,%r10,4), %xmm0 movss %xmm0, 12(%rdi,%r10,4) addq $4, %r10 cmpq %rcx, %r10 jb .LBB1_4 jmp .LBB1_5 .LBB1_6: # %._crit_edge43 retq .Lfunc_end1: .size _Z7cpu_mulPfS_S_mm, .Lfunc_end1-_Z7cpu_mulPfS_S_mm .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .Lfunc_begin0: .cfi_startproc .cfi_personality 3, __gxx_personality_v0 .cfi_lsda 3, .Lexception0 # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $168, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 xorps %xmm0, %xmm0 movaps %xmm0, (%rsp) movq $0, 16(%rsp) movq 8(%rsp), %rcx movq %rcx, %rax sarq $2, %rax cmpq $200009999, %rax # imm = 0xBEBE90F ja .LBB2_2 # %bb.1: movl $200010000, %esi # imm = 0xBEBE910 subq %rax, %rsi .Ltmp0: .cfi_escape 0x2e, 0x00 movq %rsp, %rdi callq _ZNSt6vectorIfSaIfEE17_M_default_appendEm .Ltmp1: jmp .LBB2_4 .LBB2_2: cmpq $800040000, %rcx # imm = 0x2FAFA440 je .LBB2_4 # %bb.3: movq $800040000, 8(%rsp) # imm = 0x2FAFA440 .LBB2_4: # %_ZNSt6vectorIfSaIfEE6resizeEm.exit movq (%rsp), %rbx .Ltmp3: .cfi_escape 0x2e, 0x00 leaq 32(%rsp), %rdi movl $200010000, %esi # imm = 0xBEBE910 callq hipMalloc .Ltmp4: # %bb.5: # %_ZL9hipMallocIfE10hipError_tPPT_m.exit movq 32(%rsp), %r14 movq 8(%rsp), %rdx subq (%rsp), %rdx sarq $2, %rdx .Ltmp6: .cfi_escape 0x2e, 0x00 movq %r14, %rdi movq %rbx, %rsi movl $1, %ecx callq hipMemcpy .Ltmp7: # %bb.6: .Ltmp9: .cfi_escape 0x2e, 0x00 movabsq $4294967297, %rdi # imm = 0x100000001 movabsq $42949672970000, %rdx # imm = 0x271000002710 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration .Ltmp10: # %bb.7: testl %eax, %eax jne .LBB2_10 # %bb.8: leaq 40000(%r14), %rax leaq 400040000(%r14), %rcx movq %rcx, 120(%rsp) movq %rax, 112(%rsp) movq %r14, 104(%rsp) movq $10000, 96(%rsp) # imm = 0x2710 movq $10000, 88(%rsp) # imm = 0x2710 leaq 120(%rsp), %rax movq %rax, 128(%rsp) leaq 112(%rsp), %rax movq %rax, 136(%rsp) leaq 104(%rsp), %rax movq %rax, 144(%rsp) leaq 96(%rsp), %rax movq %rax, 152(%rsp) leaq 88(%rsp), %rax movq %rax, 160(%rsp) .Ltmp11: .cfi_escape 0x2e, 0x00 leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration .Ltmp12: # %bb.9: # %.noexc46 movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d .Ltmp13: .cfi_escape 0x2e, 0x10 leaq 128(%rsp), %r9 movl $_Z7gpu_mulPfS_S_mm, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .Ltmp14: .LBB2_10: movq 32(%rsp), %rsi movq 8(%rsp), %rdx subq (%rsp), %rdx sarq $2, %rdx .Ltmp15: .cfi_escape 0x2e, 0x00 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy .Ltmp16: # %bb.11: movq 32(%rsp), %rdi .Ltmp17: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp18: # %bb.12: movq (%rsp), %rdi testq %rdi, %rdi je .LBB2_14 # %bb.13: .cfi_escape 0x2e, 0x00 callq _ZdlPv .LBB2_14: # %_ZNSt6vectorIfSaIfEED2Ev.exit xorl %eax, %eax addq $168, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .LBB2_21: .cfi_def_cfa_offset 192 .Ltmp2: jmp .LBB2_18 .LBB2_15: .Ltmp8: jmp .LBB2_18 .LBB2_17: .Ltmp5: jmp .LBB2_18 .LBB2_16: .Ltmp19: .LBB2_18: movq %rax, %rbx movq (%rsp), %rdi testq %rdi, %rdi je .LBB2_20 # %bb.19: .cfi_escape 0x2e, 0x00 callq _ZdlPv .LBB2_20: # %_ZNSt6vectorIfSaIfEED2Ev.exit49 .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _Unwind_Resume@PLT .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc .section .gcc_except_table,"a",@progbits .p2align 2, 0x0 GCC_except_table2: .Lexception0: .byte 255 # @LPStart Encoding = omit .byte 255 # @TType Encoding = omit .byte 1 # Call site Encoding = uleb128 .uleb128 .Lcst_end0-.Lcst_begin0 .Lcst_begin0: .uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 1 << .uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1 .uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2 .byte 0 # On action: cleanup .uleb128 .Ltmp3-.Lfunc_begin0 # >> Call Site 2 << .uleb128 .Ltmp4-.Ltmp3 # Call between .Ltmp3 and .Ltmp4 .uleb128 .Ltmp5-.Lfunc_begin0 # jumps to .Ltmp5 .byte 0 # On action: cleanup .uleb128 .Ltmp6-.Lfunc_begin0 # >> Call Site 3 << .uleb128 .Ltmp7-.Ltmp6 # Call between .Ltmp6 and .Ltmp7 .uleb128 .Ltmp8-.Lfunc_begin0 # jumps to .Ltmp8 .byte 0 # On action: cleanup .uleb128 .Ltmp9-.Lfunc_begin0 # >> Call Site 4 << .uleb128 .Ltmp18-.Ltmp9 # Call between .Ltmp9 and .Ltmp18 .uleb128 .Ltmp19-.Lfunc_begin0 # jumps to .Ltmp19 .byte 0 # On action: cleanup .uleb128 .Ltmp18-.Lfunc_begin0 # >> Call Site 5 << .uleb128 .Lfunc_end2-.Ltmp18 # Call between .Ltmp18 and .Lfunc_end2 .byte 0 # has no landing pad .byte 0 # On action: cleanup .Lcst_end0: .p2align 2, 0x0 # -- End function .section .text._ZNSt6vectorIfSaIfEE17_M_default_appendEm,"axG",@progbits,_ZNSt6vectorIfSaIfEE17_M_default_appendEm,comdat .weak _ZNSt6vectorIfSaIfEE17_M_default_appendEm # -- Begin function _ZNSt6vectorIfSaIfEE17_M_default_appendEm .p2align 4, 0x90 .type _ZNSt6vectorIfSaIfEE17_M_default_appendEm,@function _ZNSt6vectorIfSaIfEE17_M_default_appendEm: # @_ZNSt6vectorIfSaIfEE17_M_default_appendEm .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $24, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 testq %rsi, %rsi je .LBB3_16 # %bb.1: movq %rsi, %r14 movq %rdi, %rbx movq 8(%rdi), %r12 movq 16(%rdi), %rax subq %r12, %rax sarq $2, %rax cmpq %rsi, %rax jae .LBB3_2 # %bb.5: movabsq $2305843009213693951, %rax # imm = 0x1FFFFFFFFFFFFFFF movq (%rbx), %rcx movq %rcx, 16(%rsp) # 8-byte Spill subq %rcx, %r12 movq %r12, %r15 sarq $2, %r15 movq %r15, %rcx xorq %rax, %rcx cmpq %r14, %rcx jb .LBB3_17 # %bb.6: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit cmpq %r14, %r15 movq %r14, %rcx cmovaq %r15, %rcx leaq (%rcx,%r15), %rbp cmpq %rax, %rbp cmovaeq %rax, %rbp addq %r15, %rcx cmovbq %rax, %rbp testq %rbp, %rbp je .LBB3_7 # %bb.8: leaq (,%rbp,4), %rdi callq _Znwm movq %rax, %r13 jmp .LBB3_9 .LBB3_2: movl $0, (%r12) leaq 4(%r12), %rdi cmpq $1, %r14 je .LBB3_4 # %bb.3: # %_ZSt6fill_nIPfmfET_S1_T0_RKT1_.exit.loopexit.i.i.i leaq -4(,%r14,4), %rdx xorl %esi, %esi callq memset@PLT leaq (%r12,%r14,4), %rdi .LBB3_4: # %_ZSt27__uninitialized_default_n_aIPfmfET_S1_T0_RSaIT1_E.exit movq %rdi, 8(%rbx) jmp .LBB3_16 .LBB3_7: xorl %r13d, %r13d .LBB3_9: # %_ZNSt12_Vector_baseIfSaIfEE11_M_allocateEm.exit leaq (,%r15,4), %rax addq %r13, %rax movq %rax, 8(%rsp) # 8-byte Spill movl $0, (%r13,%r15,4) cmpq $1, %r14 je .LBB3_11 # %bb.10: # %_ZSt6fill_nIPfmfET_S1_T0_RKT1_.exit.loopexit.i.i.i30 movq 8(%rsp), %rax # 8-byte Reload leaq 4(%rax), %rdi leaq -4(,%r14,4), %rdx xorl %esi, %esi callq memset@PLT .LBB3_11: # %_ZSt27__uninitialized_default_n_aIPfmfET_S1_T0_RSaIT1_E.exit32 testq %r12, %r12 movq 16(%rsp), %r15 # 8-byte Reload jle .LBB3_13 # %bb.12: movq %r13, %rdi movq %r15, %rsi movq %r12, %rdx callq memmove@PLT .LBB3_13: # %_ZNSt6vectorIfSaIfEE11_S_relocateEPfS2_S2_RS0_.exit testq %r15, %r15 je .LBB3_15 # %bb.14: movq %r15, %rdi callq _ZdlPv .LBB3_15: # %_ZNSt12_Vector_baseIfSaIfEE13_M_deallocateEPfm.exit35 movq %r13, (%rbx) movq 8(%rsp), %rax # 8-byte Reload leaq (%rax,%r14,4), %rax movq %rax, 8(%rbx) leaq (,%rbp,4), %rax addq %r13, %rax movq %rax, 16(%rbx) .LBB3_16: addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB3_17: .cfi_def_cfa_offset 80 movl $.L.str, %edi callq _ZSt20__throw_length_errorPKc .Lfunc_end3: .size _ZNSt6vectorIfSaIfEE17_M_default_appendEm, .Lfunc_end3-_ZNSt6vectorIfSaIfEE17_M_default_appendEm .cfi_endproc # -- End function .text .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7gpu_mulPfS_S_mm, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z7gpu_mulPfS_S_mm,@object # @_Z7gpu_mulPfS_S_mm .section .rodata,"a",@progbits .globl _Z7gpu_mulPfS_S_mm .p2align 3, 0x0 _Z7gpu_mulPfS_S_mm: .quad _Z22__device_stub__gpu_mulPfS_S_mm .size _Z7gpu_mulPfS_S_mm, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "vector::_M_default_append" .size .L.str, 26 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z7gpu_mulPfS_S_mm" .size .L__unnamed_1, 19 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z22__device_stub__gpu_mulPfS_S_mm .addrsig_sym __gxx_personality_v0 .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Unwind_Resume .addrsig_sym _Z7gpu_mulPfS_S_mm .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> #include <stdio.h> #include <math.h> __global__ void cuPi(float *sum, int nbin, float step) { // Write your pi calculation kernel here return; } int main(void) { int REAL_PI = 3.141592653589793238462643383; int NBINS = 10; // modify this to achieve better performance int STEP = 5; // modify this to achieve better performance float* pi; cudaMallocManaged(&pi, sizeof(float)); // Calculate Pi cuPi<<<1, 1>>>(pi, NBINS, STEP); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); printf("Pi calculated as %f\n", *pi); float acc = REAL_PI - *pi; printf("Accuracy of Pi calculation %f\n", acc); // Free memory cudaFree(pi); return 0; }
code for sm_80 Function : _Z4cuPiPfif .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <iostream> #include <stdio.h> #include <math.h> __global__ void cuPi(float *sum, int nbin, float step) { // Write your pi calculation kernel here return; } int main(void) { int REAL_PI = 3.141592653589793238462643383; int NBINS = 10; // modify this to achieve better performance int STEP = 5; // modify this to achieve better performance float* pi; cudaMallocManaged(&pi, sizeof(float)); // Calculate Pi cuPi<<<1, 1>>>(pi, NBINS, STEP); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); printf("Pi calculated as %f\n", *pi); float acc = REAL_PI - *pi; printf("Accuracy of Pi calculation %f\n", acc); // Free memory cudaFree(pi); return 0; }
.file "tmpxft_00134f31_00000000-6_ex3.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z25__device_stub__Z4cuPiPfifPfif .type _Z25__device_stub__Z4cuPiPfifPfif, @function _Z25__device_stub__Z4cuPiPfifPfif: .LFB3694: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movss %xmm0, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movq %rsp, %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z4cuPiPfif(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE3694: .size _Z25__device_stub__Z4cuPiPfifPfif, .-_Z25__device_stub__Z4cuPiPfifPfif .globl _Z4cuPiPfif .type _Z4cuPiPfif, @function _Z4cuPiPfif: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z25__device_stub__Z4cuPiPfifPfif addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _Z4cuPiPfif, .-_Z4cuPiPfif .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "Pi calculated as %f\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC3: .string "Accuracy of Pi calculation %f\n" .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 subq $56, %rsp .cfi_def_cfa_offset 64 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leaq 8(%rsp), %rdi movl $1, %edx movl $4, %esi call cudaMallocManaged@PLT movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L15 .L12: call cudaDeviceSynchronize@PLT movq 8(%rsp), %rax pxor %xmm0, %xmm0 cvtss2sd (%rax), %xmm0 leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 8(%rsp), %rax movss .LC2(%rip), %xmm0 subss (%rax), %xmm0 cvtss2sd %xmm0, %xmm0 leaq .LC3(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L16 movl $0, %eax addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movss .LC0(%rip), %xmm0 movl $10, %esi movq 8(%rsp), %rdi call _Z25__device_stub__Z4cuPiPfifPfif jmp .L12 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.1 .LC4: .string "_Z4cuPiPfif" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z4cuPiPfif(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1084227584 .align 4 .LC2: .long 1077936128 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <stdio.h> #include <math.h> __global__ void cuPi(float *sum, int nbin, float step) { // Write your pi calculation kernel here return; } int main(void) { int REAL_PI = 3.141592653589793238462643383; int NBINS = 10; // modify this to achieve better performance int STEP = 5; // modify this to achieve better performance float* pi; cudaMallocManaged(&pi, sizeof(float)); // Calculate Pi cuPi<<<1, 1>>>(pi, NBINS, STEP); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); printf("Pi calculated as %f\n", *pi); float acc = REAL_PI - *pi; printf("Accuracy of Pi calculation %f\n", acc); // Free memory cudaFree(pi); return 0; }
#include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <math.h> __global__ void cuPi(float *sum, int nbin, float step) { // Write your pi calculation kernel here return; } int main(void) { int REAL_PI = 3.141592653589793238462643383; int NBINS = 10; // modify this to achieve better performance int STEP = 5; // modify this to achieve better performance float* pi; hipMallocManaged(&pi, sizeof(float)); // Calculate Pi cuPi<<<1, 1>>>(pi, NBINS, STEP); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); printf("Pi calculated as %f\n", *pi); float acc = REAL_PI - *pi; printf("Accuracy of Pi calculation %f\n", acc); // Free memory hipFree(pi); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <math.h> __global__ void cuPi(float *sum, int nbin, float step) { // Write your pi calculation kernel here return; } int main(void) { int REAL_PI = 3.141592653589793238462643383; int NBINS = 10; // modify this to achieve better performance int STEP = 5; // modify this to achieve better performance float* pi; hipMallocManaged(&pi, sizeof(float)); // Calculate Pi cuPi<<<1, 1>>>(pi, NBINS, STEP); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); printf("Pi calculated as %f\n", *pi); float acc = REAL_PI - *pi; printf("Accuracy of Pi calculation %f\n", acc); // Free memory hipFree(pi); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z4cuPiPfif .globl _Z4cuPiPfif .p2align 8 .type _Z4cuPiPfif,@function _Z4cuPiPfif: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z4cuPiPfif .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 16 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 1 .amdhsa_next_free_sgpr 1 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z4cuPiPfif, .Lfunc_end0-_Z4cuPiPfif .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 16 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z4cuPiPfif .private_segment_fixed_size: 0 .sgpr_count: 0 .sgpr_spill_count: 0 .symbol: _Z4cuPiPfif.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 0 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <math.h> __global__ void cuPi(float *sum, int nbin, float step) { // Write your pi calculation kernel here return; } int main(void) { int REAL_PI = 3.141592653589793238462643383; int NBINS = 10; // modify this to achieve better performance int STEP = 5; // modify this to achieve better performance float* pi; hipMallocManaged(&pi, sizeof(float)); // Calculate Pi cuPi<<<1, 1>>>(pi, NBINS, STEP); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); printf("Pi calculated as %f\n", *pi); float acc = REAL_PI - *pi; printf("Accuracy of Pi calculation %f\n", acc); // Free memory hipFree(pi); return 0; }
.text .file "ex3.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z19__device_stub__cuPiPfif # -- Begin function _Z19__device_stub__cuPiPfif .p2align 4, 0x90 .type _Z19__device_stub__cuPiPfif,@function _Z19__device_stub__cuPiPfif: # @_Z19__device_stub__cuPiPfif .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) movss %xmm0, (%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) movq %rsp, %rax movq %rax, 80(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z4cuPiPfif, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z19__device_stub__cuPiPfif, .Lfunc_end0-_Z19__device_stub__cuPiPfif .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI1_0: .long 0x40400000 # float 3 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 leaq 8(%rsp), %rdi movl $4, %esi movl $1, %edx callq hipMallocManaged movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 72(%rsp) movl $10, 20(%rsp) movl $1084227584, 16(%rsp) # imm = 0x40A00000 leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 20(%rsp), %rax movq %rax, 88(%rsp) leaq 16(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z4cuPiPfif, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceSynchronize movq 8(%rsp), %rax movss (%rax), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf movq 8(%rsp), %rax movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero subss (%rax), %xmm0 cvtss2sd %xmm0, %xmm0 movl $.L.str.1, %edi movb $1, %al callq printf movq 8(%rsp), %rdi callq hipFree xorl %eax, %eax addq $104, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z4cuPiPfif, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z4cuPiPfif,@object # @_Z4cuPiPfif .section .rodata,"a",@progbits .globl _Z4cuPiPfif .p2align 3, 0x0 _Z4cuPiPfif: .quad _Z19__device_stub__cuPiPfif .size _Z4cuPiPfif, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Pi calculated as %f\n" .size .L.str, 21 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Accuracy of Pi calculation %f\n" .size .L.str.1, 31 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z4cuPiPfif" .size .L__unnamed_1, 12 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z19__device_stub__cuPiPfif .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z4cuPiPfif .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z4cuPiPfif .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z4cuPiPfif .globl _Z4cuPiPfif .p2align 8 .type _Z4cuPiPfif,@function _Z4cuPiPfif: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z4cuPiPfif .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 16 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 1 .amdhsa_next_free_sgpr 1 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z4cuPiPfif, .Lfunc_end0-_Z4cuPiPfif .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 16 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z4cuPiPfif .private_segment_fixed_size: 0 .sgpr_count: 0 .sgpr_spill_count: 0 .symbol: _Z4cuPiPfif.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 0 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00134f31_00000000-6_ex3.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z25__device_stub__Z4cuPiPfifPfif .type _Z25__device_stub__Z4cuPiPfifPfif, @function _Z25__device_stub__Z4cuPiPfifPfif: .LFB3694: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movss %xmm0, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movq %rsp, %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z4cuPiPfif(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE3694: .size _Z25__device_stub__Z4cuPiPfifPfif, .-_Z25__device_stub__Z4cuPiPfifPfif .globl _Z4cuPiPfif .type _Z4cuPiPfif, @function _Z4cuPiPfif: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z25__device_stub__Z4cuPiPfifPfif addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _Z4cuPiPfif, .-_Z4cuPiPfif .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "Pi calculated as %f\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC3: .string "Accuracy of Pi calculation %f\n" .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 subq $56, %rsp .cfi_def_cfa_offset 64 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leaq 8(%rsp), %rdi movl $1, %edx movl $4, %esi call cudaMallocManaged@PLT movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L15 .L12: call cudaDeviceSynchronize@PLT movq 8(%rsp), %rax pxor %xmm0, %xmm0 cvtss2sd (%rax), %xmm0 leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 8(%rsp), %rax movss .LC2(%rip), %xmm0 subss (%rax), %xmm0 cvtss2sd %xmm0, %xmm0 leaq .LC3(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L16 movl $0, %eax addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movss .LC0(%rip), %xmm0 movl $10, %esi movq 8(%rsp), %rdi call _Z25__device_stub__Z4cuPiPfifPfif jmp .L12 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.1 .LC4: .string "_Z4cuPiPfif" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z4cuPiPfif(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1084227584 .align 4 .LC2: .long 1077936128 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "ex3.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z19__device_stub__cuPiPfif # -- Begin function _Z19__device_stub__cuPiPfif .p2align 4, 0x90 .type _Z19__device_stub__cuPiPfif,@function _Z19__device_stub__cuPiPfif: # @_Z19__device_stub__cuPiPfif .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) movss %xmm0, (%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) movq %rsp, %rax movq %rax, 80(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z4cuPiPfif, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z19__device_stub__cuPiPfif, .Lfunc_end0-_Z19__device_stub__cuPiPfif .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI1_0: .long 0x40400000 # float 3 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 leaq 8(%rsp), %rdi movl $4, %esi movl $1, %edx callq hipMallocManaged movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 72(%rsp) movl $10, 20(%rsp) movl $1084227584, 16(%rsp) # imm = 0x40A00000 leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 20(%rsp), %rax movq %rax, 88(%rsp) leaq 16(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z4cuPiPfif, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceSynchronize movq 8(%rsp), %rax movss (%rax), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf movq 8(%rsp), %rax movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero subss (%rax), %xmm0 cvtss2sd %xmm0, %xmm0 movl $.L.str.1, %edi movb $1, %al callq printf movq 8(%rsp), %rdi callq hipFree xorl %eax, %eax addq $104, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z4cuPiPfif, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z4cuPiPfif,@object # @_Z4cuPiPfif .section .rodata,"a",@progbits .globl _Z4cuPiPfif .p2align 3, 0x0 _Z4cuPiPfif: .quad _Z19__device_stub__cuPiPfif .size _Z4cuPiPfif, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Pi calculated as %f\n" .size .L.str, 21 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Accuracy of Pi calculation %f\n" .size .L.str.1, 31 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z4cuPiPfif" .size .L__unnamed_1, 12 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z19__device_stub__cuPiPfif .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z4cuPiPfif .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
/* Copyright 2017 the arraydiff authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda_runtime_api.h> #include <stdint.h> #include <stdlib.h> __global__ void cast_u8_to_f32( uint32_t dim, const uint8_t *x, float *y) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < dim) { y[idx] = (float)(x[idx]); } } extern "C" void arraydiff_cuda_kernel_cast_u8_to_f32( size_t dim, const uint8_t *x, float *y, cudaStream_t stream) { cast_u8_to_f32<<<(dim+1024-1)/1024, 1024, 0, stream>>>( dim, x, y); } __global__ void cast_u8x4_to_f32x4( uint32_t dim, const uint8_t *x, float *y) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx + 4 <= dim) { uint32_t i = idx >> 2; uchar4 vx_i = ((const uchar4 *)x)[i]; float4 vy_i = { (float)vx_i.x, (float)vx_i.y, (float)vx_i.z, (float)vx_i.w }; ((float4 *)y)[i] = vy_i; } else if (idx < dim) { y[idx] = (float)(x[idx]); } } extern "C" void arraydiff_cuda_kernel_cast_u8x4_to_f32x4( size_t dim, const uint8_t *x, float *y, cudaStream_t stream) { cast_u8x4_to_f32x4<<<(dim+1024-1)/1024, 1024, 0, stream>>>( dim, x, y); }
code for sm_80 Function : _Z18cast_u8x4_to_f32x4jPKhPf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0040*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */ /* 0x001fca00078e0203 */ /*0050*/ IADD3 R0, R2, 0x4, RZ ; /* 0x0000000402007810 */ /* 0x000fc80007ffe0ff */ /*0060*/ ISETP.GT.U32.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */ /* 0x000fda0003f04070 */ /*0070*/ @P0 BRA 0x140 ; /* 0x000000c000000947 */ /* 0x000fea0003800000 */ /*0080*/ SHF.R.U32.HI R2, RZ, 0x2, R2 ; /* 0x00000002ff027819 */ /* 0x000fc80000011602 */ /*0090*/ LEA R4, P0, R2, c[0x0][0x168], 0x2 ; /* 0x00005a0002047a11 */ /* 0x000fca00078010ff */ /*00a0*/ IMAD.X R5, RZ, RZ, c[0x0][0x16c], P0 ; /* 0x00005b00ff057624 */ /* 0x000fca00000e06ff */ /*00b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ HFMA2.MMA R3, -RZ, RZ, 0, 9.5367431640625e-07 ; /* 0x00000010ff037435 */ /* 0x000fd400000001ff */ /*00d0*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fe200078e0003 */ /*00e0*/ I2F.U8 R11, R4.B3 ; /* 0x30000004000b7306 */ /* 0x004ff00000001000 */ /*00f0*/ I2F.U8 R10, R4.B2 ; /* 0x20000004000a7306 */ /* 0x000ff00000001000 */ /*0100*/ I2F.U8 R9, R4.B1 ; /* 0x1000000400097306 */ /* 0x000ff00000001000 */ /*0110*/ I2F.U8 R8, R4 ; /* 0x0000000400087306 */ /* 0x000e240000001000 */ /*0120*/ STG.E.128 [R2.64], R8 ; /* 0x0000000802007986 */ /* 0x001fe2000c101d04 */ /*0130*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0140*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x160], PT ; /* 0x0000580002007a0c */ /* 0x000fda0003f06070 */ /*0150*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0160*/ IADD3 R4, P0, R2, c[0x0][0x168], RZ ; /* 0x00005a0002047a10 */ /* 0x000fca0007f1e0ff */ /*0170*/ IMAD.X R5, RZ, RZ, c[0x0][0x16c], P0 ; /* 0x00005b00ff057624 */ /* 0x000fca00000e06ff */ /*0180*/ LDG.E.U8 R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1100 */ /*0190*/ MOV R3, 0x4 ; /* 0x0000000400037802 */ /* 0x000fca0000000f00 */ /*01a0*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fe200078e0003 */ /*01b0*/ I2F.U16 R7, R4 ; /* 0x0000000400077306 */ /* 0x004e280000101000 */ /*01c0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x001fe2000c101904 */ /*01d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z14cast_u8_to_f32jPKhPf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */ /* 0x000e280000002100 */ /*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e240000002500 */ /*0030*/ IMAD R2, R3, c[0x0][0x0], R2 ; /* 0x0000000003027a24 */ /* 0x001fca00078e0202 */ /*0040*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x160], PT ; /* 0x0000580002007a0c */ /* 0x000fda0003f06070 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ IADD3 R4, P0, R2, c[0x0][0x168], RZ ; /* 0x00005a0002047a10 */ /* 0x000fe20007f1e0ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0080*/ IADD3.X R5, RZ, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b00ff057a10 */ /* 0x000fca00007fe4ff */ /*0090*/ LDG.E.U8 R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1100 */ /*00a0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fd400000001ff */ /*00b0*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fe200078e0003 */ /*00c0*/ I2F.U16 R7, R4 ; /* 0x0000000400077306 */ /* 0x004e280000101000 */ /*00d0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x001fe2000c101904 */ /*00e0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* Copyright 2017 the arraydiff authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda_runtime_api.h> #include <stdint.h> #include <stdlib.h> __global__ void cast_u8_to_f32( uint32_t dim, const uint8_t *x, float *y) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < dim) { y[idx] = (float)(x[idx]); } } extern "C" void arraydiff_cuda_kernel_cast_u8_to_f32( size_t dim, const uint8_t *x, float *y, cudaStream_t stream) { cast_u8_to_f32<<<(dim+1024-1)/1024, 1024, 0, stream>>>( dim, x, y); } __global__ void cast_u8x4_to_f32x4( uint32_t dim, const uint8_t *x, float *y) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx + 4 <= dim) { uint32_t i = idx >> 2; uchar4 vx_i = ((const uchar4 *)x)[i]; float4 vy_i = { (float)vx_i.x, (float)vx_i.y, (float)vx_i.z, (float)vx_i.w }; ((float4 *)y)[i] = vy_i; } else if (idx < dim) { y[idx] = (float)(x[idx]); } } extern "C" void arraydiff_cuda_kernel_cast_u8x4_to_f32x4( size_t dim, const uint8_t *x, float *y, cudaStream_t stream) { cast_u8x4_to_f32x4<<<(dim+1024-1)/1024, 1024, 0, stream>>>( dim, x, y); }
.file "tmpxft_0012f351_00000000-6_transform.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2031: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2031: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z38__device_stub__Z14cast_u8_to_f32jPKhPfjPKhPf .type _Z38__device_stub__Z14cast_u8_to_f32jPKhPfjPKhPf, @function _Z38__device_stub__Z14cast_u8_to_f32jPKhPfjPKhPf: .LFB2053: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movl %edi, 28(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14cast_u8_to_f32jPKhPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2053: .size _Z38__device_stub__Z14cast_u8_to_f32jPKhPfjPKhPf, .-_Z38__device_stub__Z14cast_u8_to_f32jPKhPfjPKhPf .globl _Z14cast_u8_to_f32jPKhPf .type _Z14cast_u8_to_f32jPKhPf, @function _Z14cast_u8_to_f32jPKhPf: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z38__device_stub__Z14cast_u8_to_f32jPKhPfjPKhPf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _Z14cast_u8_to_f32jPKhPf, .-_Z14cast_u8_to_f32jPKhPf .globl arraydiff_cuda_kernel_cast_u8_to_f32 .type arraydiff_cuda_kernel_cast_u8_to_f32, @function arraydiff_cuda_kernel_cast_u8_to_f32: .LFB2027: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $32, %rsp .cfi_def_cfa_offset 64 movq %rdi, %rbx movq %rsi, %rbp movq %rdx, %r12 movq %rcx, %r9 movl $1024, 20(%rsp) movl $1, 24(%rsp) leaq 1023(%rdi), %rax shrq $10, %rax movl %eax, 8(%rsp) movl $1, 12(%rsp) movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L11: addq $32, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state movq %r12, %rdx movq %rbp, %rsi movl %ebx, %edi call _Z38__device_stub__Z14cast_u8_to_f32jPKhPfjPKhPf jmp .L11 .cfi_endproc .LFE2027: .size arraydiff_cuda_kernel_cast_u8_to_f32, .-arraydiff_cuda_kernel_cast_u8_to_f32 .globl _Z42__device_stub__Z18cast_u8x4_to_f32x4jPKhPfjPKhPf .type _Z42__device_stub__Z18cast_u8x4_to_f32x4jPKhPfjPKhPf, @function _Z42__device_stub__Z18cast_u8x4_to_f32x4jPKhPfjPKhPf: .LFB2055: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movl %edi, 28(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L19 .L15: movq 120(%rsp), %rax subq %fs:40, %rax jne .L20 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L19: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z18cast_u8x4_to_f32x4jPKhPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L15 .L20: call __stack_chk_fail@PLT .cfi_endproc .LFE2055: .size _Z42__device_stub__Z18cast_u8x4_to_f32x4jPKhPfjPKhPf, .-_Z42__device_stub__Z18cast_u8x4_to_f32x4jPKhPfjPKhPf .globl _Z18cast_u8x4_to_f32x4jPKhPf .type _Z18cast_u8x4_to_f32x4jPKhPf, @function _Z18cast_u8x4_to_f32x4jPKhPf: .LFB2056: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z18cast_u8x4_to_f32x4jPKhPfjPKhPf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2056: .size _Z18cast_u8x4_to_f32x4jPKhPf, .-_Z18cast_u8x4_to_f32x4jPKhPf .globl arraydiff_cuda_kernel_cast_u8x4_to_f32x4 .type arraydiff_cuda_kernel_cast_u8x4_to_f32x4, @function arraydiff_cuda_kernel_cast_u8x4_to_f32x4: .LFB2028: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $32, %rsp .cfi_def_cfa_offset 64 movq %rdi, %rbx movq %rsi, %rbp movq %rdx, %r12 movq %rcx, %r9 movl $1024, 20(%rsp) movl $1, 24(%rsp) leaq 1023(%rdi), %rax shrq $10, %rax movl %eax, 8(%rsp) movl $1, 12(%rsp) movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L26 .L23: addq $32, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L26: .cfi_restore_state movq %r12, %rdx movq %rbp, %rsi movl %ebx, %edi call _Z42__device_stub__Z18cast_u8x4_to_f32x4jPKhPfjPKhPf jmp .L23 .cfi_endproc .LFE2028: .size arraydiff_cuda_kernel_cast_u8x4_to_f32x4, .-arraydiff_cuda_kernel_cast_u8x4_to_f32x4 .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z18cast_u8x4_to_f32x4jPKhPf" .LC1: .string "_Z14cast_u8_to_f32jPKhPf" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2058: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z18cast_u8x4_to_f32x4jPKhPf(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z14cast_u8_to_f32jPKhPf(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* Copyright 2017 the arraydiff authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda_runtime_api.h> #include <stdint.h> #include <stdlib.h> __global__ void cast_u8_to_f32( uint32_t dim, const uint8_t *x, float *y) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < dim) { y[idx] = (float)(x[idx]); } } extern "C" void arraydiff_cuda_kernel_cast_u8_to_f32( size_t dim, const uint8_t *x, float *y, cudaStream_t stream) { cast_u8_to_f32<<<(dim+1024-1)/1024, 1024, 0, stream>>>( dim, x, y); } __global__ void cast_u8x4_to_f32x4( uint32_t dim, const uint8_t *x, float *y) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx + 4 <= dim) { uint32_t i = idx >> 2; uchar4 vx_i = ((const uchar4 *)x)[i]; float4 vy_i = { (float)vx_i.x, (float)vx_i.y, (float)vx_i.z, (float)vx_i.w }; ((float4 *)y)[i] = vy_i; } else if (idx < dim) { y[idx] = (float)(x[idx]); } } extern "C" void arraydiff_cuda_kernel_cast_u8x4_to_f32x4( size_t dim, const uint8_t *x, float *y, cudaStream_t stream) { cast_u8x4_to_f32x4<<<(dim+1024-1)/1024, 1024, 0, stream>>>( dim, x, y); }
/* Copyright 2017 the arraydiff authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <stdint.h> #include <stdlib.h> __global__ void cast_u8_to_f32( uint32_t dim, const uint8_t *x, float *y) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < dim) { y[idx] = (float)(x[idx]); } } extern "C" void arraydiff_cuda_kernel_cast_u8_to_f32( size_t dim, const uint8_t *x, float *y, hipStream_t stream) { cast_u8_to_f32<<<(dim+1024-1)/1024, 1024, 0, stream>>>( dim, x, y); } __global__ void cast_u8x4_to_f32x4( uint32_t dim, const uint8_t *x, float *y) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx + 4 <= dim) { uint32_t i = idx >> 2; uchar4 vx_i = ((const uchar4 *)x)[i]; float4 vy_i = { (float)vx_i.x, (float)vx_i.y, (float)vx_i.z, (float)vx_i.w }; ((float4 *)y)[i] = vy_i; } else if (idx < dim) { y[idx] = (float)(x[idx]); } } extern "C" void arraydiff_cuda_kernel_cast_u8x4_to_f32x4( size_t dim, const uint8_t *x, float *y, hipStream_t stream) { cast_u8x4_to_f32x4<<<(dim+1024-1)/1024, 1024, 0, stream>>>( dim, x, y); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/* Copyright 2017 the arraydiff authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <stdint.h> #include <stdlib.h> __global__ void cast_u8_to_f32( uint32_t dim, const uint8_t *x, float *y) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < dim) { y[idx] = (float)(x[idx]); } } extern "C" void arraydiff_cuda_kernel_cast_u8_to_f32( size_t dim, const uint8_t *x, float *y, hipStream_t stream) { cast_u8_to_f32<<<(dim+1024-1)/1024, 1024, 0, stream>>>( dim, x, y); } __global__ void cast_u8x4_to_f32x4( uint32_t dim, const uint8_t *x, float *y) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx + 4 <= dim) { uint32_t i = idx >> 2; uchar4 vx_i = ((const uchar4 *)x)[i]; float4 vy_i = { (float)vx_i.x, (float)vx_i.y, (float)vx_i.z, (float)vx_i.w }; ((float4 *)y)[i] = vy_i; } else if (idx < dim) { y[idx] = (float)(x[idx]); } } extern "C" void arraydiff_cuda_kernel_cast_u8x4_to_f32x4( size_t dim, const uint8_t *x, float *y, hipStream_t stream) { cast_u8x4_to_f32x4<<<(dim+1024-1)/1024, 1024, 0, stream>>>( dim, x, y); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z14cast_u8_to_f32jPKhPf .globl _Z14cast_u8_to_f32jPKhPf .p2align 8 .type _Z14cast_u8_to_f32jPKhPf,@function _Z14cast_u8_to_f32jPKhPf: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b32 s3, s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_u32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b128 s[0:3], s[0:1], 0x8 v_mov_b32_e32 v2, 0 s_waitcnt lgkmcnt(0) global_load_u8 v3, v1, s[0:1] v_lshlrev_b64 v[0:1], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s2, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo s_waitcnt vmcnt(0) v_cvt_f32_ubyte0_e32 v2, v3 global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14cast_u8_to_f32jPKhPf .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z14cast_u8_to_f32jPKhPf, .Lfunc_end0-_Z14cast_u8_to_f32jPKhPf .section .AMDGPU.csdata,"",@progbits .text .protected _Z18cast_u8x4_to_f32x4jPKhPf .globl _Z18cast_u8x4_to_f32x4jPKhPf .p2align 8 .type _Z18cast_u8x4_to_f32x4jPKhPf,@function _Z18cast_u8x4_to_f32x4jPKhPf: s_clause 0x2 s_load_b32 s5, s[0:1], 0x24 s_load_b32 s4, s[0:1], 0x0 s_load_b128 s[0:3], s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s5, s5, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s5, v[0:1] s_mov_b32 s5, exec_lo v_add_nc_u32_e32 v0, 4, v1 s_delay_alu instid0(VALU_DEP_1) v_cmpx_ge_u32_e64 s4, v0 s_xor_b32 s5, exec_lo, s5 s_cbranch_execz .LBB1_2 v_and_b32_e32 v0, -4, v1 s_clause 0x3 global_load_u8 v2, v0, s[0:1] global_load_u8 v3, v0, s[0:1] offset:1 global_load_u8 v6, v0, s[0:1] offset:2 global_load_u8 v7, v0, s[0:1] offset:3 v_lshrrev_b32_e32 v0, 2, v1 v_mov_b32_e32 v1, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 4, v[0:1] v_add_co_u32 v4, vcc_lo, s2, v4 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo s_waitcnt vmcnt(3) v_cvt_f32_ubyte0_e32 v0, v2 s_waitcnt vmcnt(2) v_cvt_f32_ubyte0_e32 v1, v3 s_waitcnt vmcnt(1) v_cvt_f32_ubyte0_e32 v2, v6 s_waitcnt vmcnt(0) v_cvt_f32_ubyte0_e32 v3, v7 global_store_b128 v[4:5], v[0:3], off .LBB1_2: s_and_not1_saveexec_b32 s5, s5 s_cbranch_execz .LBB1_5 v_cmp_gt_u32_e32 vcc_lo, s4, v1 s_and_saveexec_b32 s4, vcc_lo s_cbranch_execz .LBB1_5 global_load_u8 v3, v1, s[0:1] v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] v_add_co_u32 v0, vcc_lo, s2, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo s_waitcnt vmcnt(0) v_cvt_f32_ubyte0_e32 v2, v3 global_store_b32 v[0:1], v2, off .LBB1_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18cast_u8x4_to_f32x4jPKhPf .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z18cast_u8x4_to_f32x4jPKhPf, .Lfunc_end1-_Z18cast_u8x4_to_f32x4jPKhPf .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14cast_u8_to_f32jPKhPf .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14cast_u8_to_f32jPKhPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18cast_u8x4_to_f32x4jPKhPf .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z18cast_u8x4_to_f32x4jPKhPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* Copyright 2017 the arraydiff authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <stdint.h> #include <stdlib.h> __global__ void cast_u8_to_f32( uint32_t dim, const uint8_t *x, float *y) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < dim) { y[idx] = (float)(x[idx]); } } extern "C" void arraydiff_cuda_kernel_cast_u8_to_f32( size_t dim, const uint8_t *x, float *y, hipStream_t stream) { cast_u8_to_f32<<<(dim+1024-1)/1024, 1024, 0, stream>>>( dim, x, y); } __global__ void cast_u8x4_to_f32x4( uint32_t dim, const uint8_t *x, float *y) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx + 4 <= dim) { uint32_t i = idx >> 2; uchar4 vx_i = ((const uchar4 *)x)[i]; float4 vy_i = { (float)vx_i.x, (float)vx_i.y, (float)vx_i.z, (float)vx_i.w }; ((float4 *)y)[i] = vy_i; } else if (idx < dim) { y[idx] = (float)(x[idx]); } } extern "C" void arraydiff_cuda_kernel_cast_u8x4_to_f32x4( size_t dim, const uint8_t *x, float *y, hipStream_t stream) { cast_u8x4_to_f32x4<<<(dim+1024-1)/1024, 1024, 0, stream>>>( dim, x, y); }
.text .file "transform.hip" .globl _Z29__device_stub__cast_u8_to_f32jPKhPf # -- Begin function _Z29__device_stub__cast_u8_to_f32jPKhPf .p2align 4, 0x90 .type _Z29__device_stub__cast_u8_to_f32jPKhPf,@function _Z29__device_stub__cast_u8_to_f32jPKhPf: # @_Z29__device_stub__cast_u8_to_f32jPKhPf .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movl %edi, 12(%rsp) movq %rsi, 72(%rsp) movq %rdx, 64(%rsp) leaq 12(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14cast_u8_to_f32jPKhPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z29__device_stub__cast_u8_to_f32jPKhPf, .Lfunc_end0-_Z29__device_stub__cast_u8_to_f32jPKhPf .cfi_endproc # -- End function .globl arraydiff_cuda_kernel_cast_u8_to_f32 # -- Begin function arraydiff_cuda_kernel_cast_u8_to_f32 .p2align 4, 0x90 .type arraydiff_cuda_kernel_cast_u8_to_f32,@function arraydiff_cuda_kernel_cast_u8_to_f32: # @arraydiff_cuda_kernel_cast_u8_to_f32 .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $112, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rcx, %r9 movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 leaq 1023(%rdi), %rax shrq $10, %rax movl %eax, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $1024, %rdx # imm = 0x400 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movl %r15d, 12(%rsp) movq %r14, 72(%rsp) movq %rbx, 64(%rsp) leaq 12(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14cast_u8_to_f32jPKhPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: addq $112, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size arraydiff_cuda_kernel_cast_u8_to_f32, .Lfunc_end1-arraydiff_cuda_kernel_cast_u8_to_f32 .cfi_endproc # -- End function .globl _Z33__device_stub__cast_u8x4_to_f32x4jPKhPf # -- Begin function _Z33__device_stub__cast_u8x4_to_f32x4jPKhPf .p2align 4, 0x90 .type _Z33__device_stub__cast_u8x4_to_f32x4jPKhPf,@function _Z33__device_stub__cast_u8x4_to_f32x4jPKhPf: # @_Z33__device_stub__cast_u8x4_to_f32x4jPKhPf .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movl %edi, 12(%rsp) movq %rsi, 72(%rsp) movq %rdx, 64(%rsp) leaq 12(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18cast_u8x4_to_f32x4jPKhPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end2: .size _Z33__device_stub__cast_u8x4_to_f32x4jPKhPf, .Lfunc_end2-_Z33__device_stub__cast_u8x4_to_f32x4jPKhPf .cfi_endproc # -- End function .globl arraydiff_cuda_kernel_cast_u8x4_to_f32x4 # -- Begin function arraydiff_cuda_kernel_cast_u8x4_to_f32x4 .p2align 4, 0x90 .type arraydiff_cuda_kernel_cast_u8x4_to_f32x4,@function arraydiff_cuda_kernel_cast_u8x4_to_f32x4: # @arraydiff_cuda_kernel_cast_u8x4_to_f32x4 .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $112, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rcx, %r9 movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 leaq 1023(%rdi), %rax shrq $10, %rax movl %eax, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $1024, %rdx # imm = 0x400 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_2 # %bb.1: movl %r15d, 12(%rsp) movq %r14, 72(%rsp) movq %rbx, 64(%rsp) leaq 12(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18cast_u8x4_to_f32x4jPKhPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_2: addq $112, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size arraydiff_cuda_kernel_cast_u8x4_to_f32x4, .Lfunc_end3-arraydiff_cuda_kernel_cast_u8x4_to_f32x4 .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14cast_u8_to_f32jPKhPf, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18cast_u8x4_to_f32x4jPKhPf, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z14cast_u8_to_f32jPKhPf,@object # @_Z14cast_u8_to_f32jPKhPf .section .rodata,"a",@progbits .globl _Z14cast_u8_to_f32jPKhPf .p2align 3, 0x0 _Z14cast_u8_to_f32jPKhPf: .quad _Z29__device_stub__cast_u8_to_f32jPKhPf .size _Z14cast_u8_to_f32jPKhPf, 8 .type _Z18cast_u8x4_to_f32x4jPKhPf,@object # @_Z18cast_u8x4_to_f32x4jPKhPf .globl _Z18cast_u8x4_to_f32x4jPKhPf .p2align 3, 0x0 _Z18cast_u8x4_to_f32x4jPKhPf: .quad _Z33__device_stub__cast_u8x4_to_f32x4jPKhPf .size _Z18cast_u8x4_to_f32x4jPKhPf, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z14cast_u8_to_f32jPKhPf" .size .L__unnamed_1, 25 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z18cast_u8x4_to_f32x4jPKhPf" .size .L__unnamed_2, 29 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z29__device_stub__cast_u8_to_f32jPKhPf .addrsig_sym _Z33__device_stub__cast_u8x4_to_f32x4jPKhPf .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z14cast_u8_to_f32jPKhPf .addrsig_sym _Z18cast_u8x4_to_f32x4jPKhPf .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z18cast_u8x4_to_f32x4jPKhPf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0040*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */ /* 0x001fca00078e0203 */ /*0050*/ IADD3 R0, R2, 0x4, RZ ; /* 0x0000000402007810 */ /* 0x000fc80007ffe0ff */ /*0060*/ ISETP.GT.U32.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */ /* 0x000fda0003f04070 */ /*0070*/ @P0 BRA 0x140 ; /* 0x000000c000000947 */ /* 0x000fea0003800000 */ /*0080*/ SHF.R.U32.HI R2, RZ, 0x2, R2 ; /* 0x00000002ff027819 */ /* 0x000fc80000011602 */ /*0090*/ LEA R4, P0, R2, c[0x0][0x168], 0x2 ; /* 0x00005a0002047a11 */ /* 0x000fca00078010ff */ /*00a0*/ IMAD.X R5, RZ, RZ, c[0x0][0x16c], P0 ; /* 0x00005b00ff057624 */ /* 0x000fca00000e06ff */ /*00b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ HFMA2.MMA R3, -RZ, RZ, 0, 9.5367431640625e-07 ; /* 0x00000010ff037435 */ /* 0x000fd400000001ff */ /*00d0*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fe200078e0003 */ /*00e0*/ I2F.U8 R11, R4.B3 ; /* 0x30000004000b7306 */ /* 0x004ff00000001000 */ /*00f0*/ I2F.U8 R10, R4.B2 ; /* 0x20000004000a7306 */ /* 0x000ff00000001000 */ /*0100*/ I2F.U8 R9, R4.B1 ; /* 0x1000000400097306 */ /* 0x000ff00000001000 */ /*0110*/ I2F.U8 R8, R4 ; /* 0x0000000400087306 */ /* 0x000e240000001000 */ /*0120*/ STG.E.128 [R2.64], R8 ; /* 0x0000000802007986 */ /* 0x001fe2000c101d04 */ /*0130*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0140*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x160], PT ; /* 0x0000580002007a0c */ /* 0x000fda0003f06070 */ /*0150*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0160*/ IADD3 R4, P0, R2, c[0x0][0x168], RZ ; /* 0x00005a0002047a10 */ /* 0x000fca0007f1e0ff */ /*0170*/ IMAD.X R5, RZ, RZ, c[0x0][0x16c], P0 ; /* 0x00005b00ff057624 */ /* 0x000fca00000e06ff */ /*0180*/ LDG.E.U8 R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1100 */ /*0190*/ MOV R3, 0x4 ; /* 0x0000000400037802 */ /* 0x000fca0000000f00 */ /*01a0*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fe200078e0003 */ /*01b0*/ I2F.U16 R7, R4 ; /* 0x0000000400077306 */ /* 0x004e280000101000 */ /*01c0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x001fe2000c101904 */ /*01d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z14cast_u8_to_f32jPKhPf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */ /* 0x000e280000002100 */ /*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e240000002500 */ /*0030*/ IMAD R2, R3, c[0x0][0x0], R2 ; /* 0x0000000003027a24 */ /* 0x001fca00078e0202 */ /*0040*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x160], PT ; /* 0x0000580002007a0c */ /* 0x000fda0003f06070 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ IADD3 R4, P0, R2, c[0x0][0x168], RZ ; /* 0x00005a0002047a10 */ /* 0x000fe20007f1e0ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0080*/ IADD3.X R5, RZ, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b00ff057a10 */ /* 0x000fca00007fe4ff */ /*0090*/ LDG.E.U8 R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1100 */ /*00a0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fd400000001ff */ /*00b0*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fe200078e0003 */ /*00c0*/ I2F.U16 R7, R4 ; /* 0x0000000400077306 */ /* 0x004e280000101000 */ /*00d0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x001fe2000c101904 */ /*00e0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z14cast_u8_to_f32jPKhPf .globl _Z14cast_u8_to_f32jPKhPf .p2align 8 .type _Z14cast_u8_to_f32jPKhPf,@function _Z14cast_u8_to_f32jPKhPf: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b32 s3, s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_u32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b128 s[0:3], s[0:1], 0x8 v_mov_b32_e32 v2, 0 s_waitcnt lgkmcnt(0) global_load_u8 v3, v1, s[0:1] v_lshlrev_b64 v[0:1], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s2, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo s_waitcnt vmcnt(0) v_cvt_f32_ubyte0_e32 v2, v3 global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14cast_u8_to_f32jPKhPf .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z14cast_u8_to_f32jPKhPf, .Lfunc_end0-_Z14cast_u8_to_f32jPKhPf .section .AMDGPU.csdata,"",@progbits .text .protected _Z18cast_u8x4_to_f32x4jPKhPf .globl _Z18cast_u8x4_to_f32x4jPKhPf .p2align 8 .type _Z18cast_u8x4_to_f32x4jPKhPf,@function _Z18cast_u8x4_to_f32x4jPKhPf: s_clause 0x2 s_load_b32 s5, s[0:1], 0x24 s_load_b32 s4, s[0:1], 0x0 s_load_b128 s[0:3], s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s5, s5, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s5, v[0:1] s_mov_b32 s5, exec_lo v_add_nc_u32_e32 v0, 4, v1 s_delay_alu instid0(VALU_DEP_1) v_cmpx_ge_u32_e64 s4, v0 s_xor_b32 s5, exec_lo, s5 s_cbranch_execz .LBB1_2 v_and_b32_e32 v0, -4, v1 s_clause 0x3 global_load_u8 v2, v0, s[0:1] global_load_u8 v3, v0, s[0:1] offset:1 global_load_u8 v6, v0, s[0:1] offset:2 global_load_u8 v7, v0, s[0:1] offset:3 v_lshrrev_b32_e32 v0, 2, v1 v_mov_b32_e32 v1, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 4, v[0:1] v_add_co_u32 v4, vcc_lo, s2, v4 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo s_waitcnt vmcnt(3) v_cvt_f32_ubyte0_e32 v0, v2 s_waitcnt vmcnt(2) v_cvt_f32_ubyte0_e32 v1, v3 s_waitcnt vmcnt(1) v_cvt_f32_ubyte0_e32 v2, v6 s_waitcnt vmcnt(0) v_cvt_f32_ubyte0_e32 v3, v7 global_store_b128 v[4:5], v[0:3], off .LBB1_2: s_and_not1_saveexec_b32 s5, s5 s_cbranch_execz .LBB1_5 v_cmp_gt_u32_e32 vcc_lo, s4, v1 s_and_saveexec_b32 s4, vcc_lo s_cbranch_execz .LBB1_5 global_load_u8 v3, v1, s[0:1] v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] v_add_co_u32 v0, vcc_lo, s2, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo s_waitcnt vmcnt(0) v_cvt_f32_ubyte0_e32 v2, v3 global_store_b32 v[0:1], v2, off .LBB1_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18cast_u8x4_to_f32x4jPKhPf .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z18cast_u8x4_to_f32x4jPKhPf, .Lfunc_end1-_Z18cast_u8x4_to_f32x4jPKhPf .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14cast_u8_to_f32jPKhPf .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14cast_u8_to_f32jPKhPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18cast_u8x4_to_f32x4jPKhPf .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z18cast_u8x4_to_f32x4jPKhPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0012f351_00000000-6_transform.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2031: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2031: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z38__device_stub__Z14cast_u8_to_f32jPKhPfjPKhPf .type _Z38__device_stub__Z14cast_u8_to_f32jPKhPfjPKhPf, @function _Z38__device_stub__Z14cast_u8_to_f32jPKhPfjPKhPf: .LFB2053: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movl %edi, 28(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14cast_u8_to_f32jPKhPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2053: .size _Z38__device_stub__Z14cast_u8_to_f32jPKhPfjPKhPf, .-_Z38__device_stub__Z14cast_u8_to_f32jPKhPfjPKhPf .globl _Z14cast_u8_to_f32jPKhPf .type _Z14cast_u8_to_f32jPKhPf, @function _Z14cast_u8_to_f32jPKhPf: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z38__device_stub__Z14cast_u8_to_f32jPKhPfjPKhPf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _Z14cast_u8_to_f32jPKhPf, .-_Z14cast_u8_to_f32jPKhPf .globl arraydiff_cuda_kernel_cast_u8_to_f32 .type arraydiff_cuda_kernel_cast_u8_to_f32, @function arraydiff_cuda_kernel_cast_u8_to_f32: .LFB2027: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $32, %rsp .cfi_def_cfa_offset 64 movq %rdi, %rbx movq %rsi, %rbp movq %rdx, %r12 movq %rcx, %r9 movl $1024, 20(%rsp) movl $1, 24(%rsp) leaq 1023(%rdi), %rax shrq $10, %rax movl %eax, 8(%rsp) movl $1, 12(%rsp) movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L11: addq $32, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state movq %r12, %rdx movq %rbp, %rsi movl %ebx, %edi call _Z38__device_stub__Z14cast_u8_to_f32jPKhPfjPKhPf jmp .L11 .cfi_endproc .LFE2027: .size arraydiff_cuda_kernel_cast_u8_to_f32, .-arraydiff_cuda_kernel_cast_u8_to_f32 .globl _Z42__device_stub__Z18cast_u8x4_to_f32x4jPKhPfjPKhPf .type _Z42__device_stub__Z18cast_u8x4_to_f32x4jPKhPfjPKhPf, @function _Z42__device_stub__Z18cast_u8x4_to_f32x4jPKhPfjPKhPf: .LFB2055: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movl %edi, 28(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L19 .L15: movq 120(%rsp), %rax subq %fs:40, %rax jne .L20 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L19: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z18cast_u8x4_to_f32x4jPKhPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L15 .L20: call __stack_chk_fail@PLT .cfi_endproc .LFE2055: .size _Z42__device_stub__Z18cast_u8x4_to_f32x4jPKhPfjPKhPf, .-_Z42__device_stub__Z18cast_u8x4_to_f32x4jPKhPfjPKhPf .globl _Z18cast_u8x4_to_f32x4jPKhPf .type _Z18cast_u8x4_to_f32x4jPKhPf, @function _Z18cast_u8x4_to_f32x4jPKhPf: .LFB2056: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z18cast_u8x4_to_f32x4jPKhPfjPKhPf addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2056: .size _Z18cast_u8x4_to_f32x4jPKhPf, .-_Z18cast_u8x4_to_f32x4jPKhPf .globl arraydiff_cuda_kernel_cast_u8x4_to_f32x4 .type arraydiff_cuda_kernel_cast_u8x4_to_f32x4, @function arraydiff_cuda_kernel_cast_u8x4_to_f32x4: .LFB2028: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $32, %rsp .cfi_def_cfa_offset 64 movq %rdi, %rbx movq %rsi, %rbp movq %rdx, %r12 movq %rcx, %r9 movl $1024, 20(%rsp) movl $1, 24(%rsp) leaq 1023(%rdi), %rax shrq $10, %rax movl %eax, 8(%rsp) movl $1, 12(%rsp) movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L26 .L23: addq $32, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L26: .cfi_restore_state movq %r12, %rdx movq %rbp, %rsi movl %ebx, %edi call _Z42__device_stub__Z18cast_u8x4_to_f32x4jPKhPfjPKhPf jmp .L23 .cfi_endproc .LFE2028: .size arraydiff_cuda_kernel_cast_u8x4_to_f32x4, .-arraydiff_cuda_kernel_cast_u8x4_to_f32x4 .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z18cast_u8x4_to_f32x4jPKhPf" .LC1: .string "_Z14cast_u8_to_f32jPKhPf" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2058: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z18cast_u8x4_to_f32x4jPKhPf(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z14cast_u8_to_f32jPKhPf(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "transform.hip" .globl _Z29__device_stub__cast_u8_to_f32jPKhPf # -- Begin function _Z29__device_stub__cast_u8_to_f32jPKhPf .p2align 4, 0x90 .type _Z29__device_stub__cast_u8_to_f32jPKhPf,@function _Z29__device_stub__cast_u8_to_f32jPKhPf: # @_Z29__device_stub__cast_u8_to_f32jPKhPf .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movl %edi, 12(%rsp) movq %rsi, 72(%rsp) movq %rdx, 64(%rsp) leaq 12(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14cast_u8_to_f32jPKhPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z29__device_stub__cast_u8_to_f32jPKhPf, .Lfunc_end0-_Z29__device_stub__cast_u8_to_f32jPKhPf .cfi_endproc # -- End function .globl arraydiff_cuda_kernel_cast_u8_to_f32 # -- Begin function arraydiff_cuda_kernel_cast_u8_to_f32 .p2align 4, 0x90 .type arraydiff_cuda_kernel_cast_u8_to_f32,@function arraydiff_cuda_kernel_cast_u8_to_f32: # @arraydiff_cuda_kernel_cast_u8_to_f32 .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $112, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rcx, %r9 movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 leaq 1023(%rdi), %rax shrq $10, %rax movl %eax, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $1024, %rdx # imm = 0x400 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movl %r15d, 12(%rsp) movq %r14, 72(%rsp) movq %rbx, 64(%rsp) leaq 12(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14cast_u8_to_f32jPKhPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: addq $112, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size arraydiff_cuda_kernel_cast_u8_to_f32, .Lfunc_end1-arraydiff_cuda_kernel_cast_u8_to_f32 .cfi_endproc # -- End function .globl _Z33__device_stub__cast_u8x4_to_f32x4jPKhPf # -- Begin function _Z33__device_stub__cast_u8x4_to_f32x4jPKhPf .p2align 4, 0x90 .type _Z33__device_stub__cast_u8x4_to_f32x4jPKhPf,@function _Z33__device_stub__cast_u8x4_to_f32x4jPKhPf: # @_Z33__device_stub__cast_u8x4_to_f32x4jPKhPf .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movl %edi, 12(%rsp) movq %rsi, 72(%rsp) movq %rdx, 64(%rsp) leaq 12(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18cast_u8x4_to_f32x4jPKhPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end2: .size _Z33__device_stub__cast_u8x4_to_f32x4jPKhPf, .Lfunc_end2-_Z33__device_stub__cast_u8x4_to_f32x4jPKhPf .cfi_endproc # -- End function .globl arraydiff_cuda_kernel_cast_u8x4_to_f32x4 # -- Begin function arraydiff_cuda_kernel_cast_u8x4_to_f32x4 .p2align 4, 0x90 .type arraydiff_cuda_kernel_cast_u8x4_to_f32x4,@function arraydiff_cuda_kernel_cast_u8x4_to_f32x4: # @arraydiff_cuda_kernel_cast_u8x4_to_f32x4 .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $112, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rcx, %r9 movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 leaq 1023(%rdi), %rax shrq $10, %rax movl %eax, %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $1024, %rdx # imm = 0x400 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_2 # %bb.1: movl %r15d, 12(%rsp) movq %r14, 72(%rsp) movq %rbx, 64(%rsp) leaq 12(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18cast_u8x4_to_f32x4jPKhPf, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_2: addq $112, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size arraydiff_cuda_kernel_cast_u8x4_to_f32x4, .Lfunc_end3-arraydiff_cuda_kernel_cast_u8x4_to_f32x4 .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14cast_u8_to_f32jPKhPf, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18cast_u8x4_to_f32x4jPKhPf, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z14cast_u8_to_f32jPKhPf,@object # @_Z14cast_u8_to_f32jPKhPf .section .rodata,"a",@progbits .globl _Z14cast_u8_to_f32jPKhPf .p2align 3, 0x0 _Z14cast_u8_to_f32jPKhPf: .quad _Z29__device_stub__cast_u8_to_f32jPKhPf .size _Z14cast_u8_to_f32jPKhPf, 8 .type _Z18cast_u8x4_to_f32x4jPKhPf,@object # @_Z18cast_u8x4_to_f32x4jPKhPf .globl _Z18cast_u8x4_to_f32x4jPKhPf .p2align 3, 0x0 _Z18cast_u8x4_to_f32x4jPKhPf: .quad _Z33__device_stub__cast_u8x4_to_f32x4jPKhPf .size _Z18cast_u8x4_to_f32x4jPKhPf, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z14cast_u8_to_f32jPKhPf" .size .L__unnamed_1, 25 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z18cast_u8x4_to_f32x4jPKhPf" .size .L__unnamed_2, 29 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z29__device_stub__cast_u8_to_f32jPKhPf .addrsig_sym _Z33__device_stub__cast_u8x4_to_f32x4jPKhPf .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z14cast_u8_to_f32jPKhPf .addrsig_sym _Z18cast_u8x4_to_f32x4jPKhPf .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> #include <fstream> #include <string> #include <vector> #include <algorithm> #include <cstdlib> #include <cmath> #include <ctime> #include <time.h> #include <stdint.h> #include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" using namespace std; __global__ void gpu_multiplication_reduce(int *first, int *subarrayN, int *N, double* vectorIN, bool* vectorBIN, double *d_out) { //index size_t index = threadIdx.x + blockIdx.x * blockDim.x + (blockDim.x * blockIdx.x); int tid = threadIdx.x; int dim = *N; int offset = *subarrayN / 2; if (*first == 1 && tid < offset && index + offset < dim) { if (vectorBIN[index]) { if (vectorBIN[index + offset]) { vectorIN[index] *= vectorIN[index + offset]; } else { vectorIN[index] *= (1 - vectorIN[index + offset]); } } else if (vectorBIN[index + offset]) { vectorIN[index] = (1 - vectorIN[index]) * vectorIN[index + offset]; } else { vectorIN[index] = (1 - vectorIN[index]) * (1 - vectorIN[index + offset]); } offset = offset / 2; } __syncthreads(); for (int i = offset; i > 0; i >>= 1) { if (tid < i && index + i < dim) { vectorIN[index] *= vectorIN[index + i]; } __syncthreads(); } if (tid == 0) { d_out[blockIdx.x] = vectorIN[index]; printf("total is %f\n", d_out[blockIdx.x]); } } fstream spam_file; fstream nonspam_file; vector<string> words; vector<int> spam_occ; vector<int> nonspam_occ; double * spam_vector; double * nonspam_vector; bool * binary_vector; // m int spam_length; // n int nonspam_length; void print_words_and_occurencies() { for (size_t i = 0; i < words.size(); i++) { cout << words[i] << " " << spam_occ[i] << " " << nonspam_occ[i] << endl; } } void print_vectors() { cout << "spam_length: " << spam_length << ", nonspamlength: " << nonspam_length << endl; cout << "word_vector" << " " << "spam_vector" << " " << "nonspam_vector" << endl; for (int i = 0; i < words.size(); i++) { cout << words[i] << " " << spam_vector[i] << " " << nonspam_vector[i] << endl; } } void finish_vectors() { spam_vector = new double[words.size()]; nonspam_vector = new double[words.size()]; for (int i = 0; i < words.size(); i++) { spam_vector[i] = (double) spam_occ[i] / (double) spam_length; nonspam_vector[i] = (double) nonspam_occ[i] / (double) nonspam_length; } } void count_occurencies_length() { string line; int counter = -1; while (!spam_file.eof()) { counter++; getline(spam_file, line); } spam_length = counter; counter = -1; while (!nonspam_file.eof()) { counter++; getline(nonspam_file, line); } nonspam_length = counter; spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); } void add_spam_training_examples() { cout << "Add spam examples to the database (\"quit\" to exit function.)" << endl; string spam_example = ""; cin.ignore(); do { getline(cin, spam_example); if (spam_example.compare("quit")) { spam_file << spam_example << endl; } else { break; } } while (spam_example.compare("quit")); spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); } void add_nonspam_training_examples() { cout << "Add nonspam examples to the database (\"quit\" to exit function.)" << endl; string nonspam_example = ""; cin.ignore(); do { getline(cin, nonspam_example); if (nonspam_example.compare("quit")) { nonspam_file << nonspam_example << endl; } else { break; } } while (nonspam_example.compare("quit")); cout << "haha" << endl; spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); } void create_words_vector() { words.clear(); count_occurencies_length(); string word; string line; string separator = " "; size_t position; // spam_file-> words vector while (!spam_file.eof()) { getline(spam_file, line); position = line.find(separator); while (line.compare("")==1) { position = line.find(separator); word = line.substr(0, position); line.erase(0, position+1); // Add word to vector of words if (find(words.begin(), words.end(), word) != words.end()) { /* words contains word */ int pos = distance(words.begin(), find(words.begin(), words.end(), word)); spam_occ[pos] += 1; } else { /* words does not contain word */ words.push_back(word); int pos = distance(words.begin(), find(words.begin(), words.end(), word)); spam_occ.push_back(1); } if (position == string::npos) { break; } } } for (int i = 0; i < words.size(); i++) { nonspam_occ.push_back(0); } // nonspam_file-> words vector while (!nonspam_file.eof()) { getline(nonspam_file, line); position = line.find(separator); while (line.compare("") == 1) { position = line.find(separator); word = line.substr(0, position); line.erase(0, position + 1); // Add word to vector of words if (find(words.begin(), words.end(), word) != words.end()) { /* words contains word */ int pos = distance(words.begin(), find(words.begin(), words.end(), word)); nonspam_occ[pos] += 1; } else { /* words does not contain word */ words.push_back(word); int pos = distance(words.begin(), find(words.begin(), words.end(), word)); nonspam_occ.push_back(1); spam_occ.push_back(0); } if (position == string::npos) { break; } } } spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); finish_vectors(); } void run_cpu(int word_size) { double p_test_spam = 1.0; double p_test_nonspam = 1.0; double time = clock(); for (int i = 0; i < word_size; i++) { if (binary_vector[i]) { p_test_spam *= spam_vector[i]; p_test_nonspam *= nonspam_vector[i]; } else { p_test_spam *= (1.0-spam_vector[i]); p_test_nonspam *= (1.0 - nonspam_vector[i]); } } cout << "CPU time is " << clock() - time << endl; cout << "total (spam) = " << p_test_spam << endl; cout << "total (nonspam) = " << p_test_nonspam << endl; double p_spam = (double) spam_length / (double) (spam_length + nonspam_length); double p_nonspam = (double) nonspam_length / (double) (spam_length + nonspam_length); double p_spam_test = 0.0; double p_nonspam_test = 0.0; p_spam_test = (p_test_spam * p_spam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); p_nonspam_test = (p_test_nonspam * p_nonspam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); cout << "P(spam) = " << p_spam << endl; cout << "P(nonspam) = " << p_nonspam << endl; cout << "P(test|spam) = " << p_test_spam << endl; cout << "P(test|nonspam) = " << p_test_nonspam << endl; cout << "P(spam|test) = " << p_spam_test << endl; cout << "P(nonspam|test) = " << p_nonspam_test << endl; cout << "word count is" << words.size() << endl; if (p_spam_test > p_nonspam) { cout << "Message is a spam." << endl; } else { cout << "Message is not a spam." << endl; } } void run_gpu(int word_size) { double p_test_spam = 1.0; double p_test_nonspam = 1.0; // ------------------------------- CUDA ----------------------- int dim = word_size; int blockSize = 1024; size_t max_elems_per_block = blockSize * 2; // due to binary tree nature of algorithm size_t blocks = 0; if (dim <= max_elems_per_block) { blocks = (size_t) ceil(float(dim) / float(max_elems_per_block)); } else { blocks = dim / max_elems_per_block; if (dim % max_elems_per_block != 0) blocks++; } cout << "Blocks: " << blocks << endl; size_t double_array_dim = dim * sizeof(double); size_t bool_array_dim = dim * sizeof(bool); size_t out_dim = blocks * sizeof(double); double *vector = (double *)malloc(double_array_dim); bool *vector_binary = (bool *)malloc(bool_array_dim); double *dev_array; bool *dev_binary; double *dev_out; double *dev_total; int *first; int *subN; int *N; cudaMalloc(&dev_array, double_array_dim); cudaMalloc(&dev_binary, bool_array_dim); cudaMalloc(&dev_out, out_dim); cudaMallocManaged(&dev_total, sizeof(double)); cudaMallocManaged(&subN, sizeof(int)); cudaMallocManaged(&N, sizeof(int)); cudaMallocManaged(&first, sizeof(int)); *dev_total = 1.0; *first = 1; *N = dim; *subN = dim / blocks; cout << "Subarray count is " << *subN << endl; // NONSPAM_VECTOR // HOST ----> DEVICE if (cudaMemcpy(dev_array, nonspam_vector, double_array_dim, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "It could not be copied! 1" << endl; return; } if (cudaMemcpy(dev_binary, binary_vector, bool_array_dim, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "It could not be copied! 2" << endl; return; } cout << "Nonspam vector: " << endl; gpu_multiplication_reduce <<<blocks, blockSize >>>(first, subN, N, dev_array, dev_binary, dev_out); cudaDeviceSynchronize(); *first = 0; *subN = blocks; gpu_multiplication_reduce <<< 1, blocks >>>(first, subN, N, dev_out, dev_binary, dev_total); cudaDeviceSynchronize(); // the result is in the first element of the vector p_test_nonspam = dev_total[0]; // SPAM_VECTOR // HOST ----> DEVICE if (cudaMemcpy(dev_array, spam_vector, double_array_dim, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "It could not be copied! 4" << endl; return; } *first = 1; cout << "Spam vector: " << endl; *subN = dim / blocks; gpu_multiplication_reduce <<<blocks, blockSize >>>(first, subN, N, dev_array, dev_binary, dev_out); cudaDeviceSynchronize(); *first = 0; *subN = blocks; gpu_multiplication_reduce <<< 1, blocks >>>(first, subN, N, dev_out, dev_binary, dev_total); cudaDeviceSynchronize(); p_test_spam = dev_total[0]; free(vector); free(vector_binary); cudaFree(dev_array); cudaFree(dev_binary); cudaFree(dev_out); cudaFree(dev_total); cudaFree(first); // -------------------------------- CUDA END ------------------ double p_spam = (double)spam_length / (double)(spam_length + nonspam_length); double p_nonspam = (double)nonspam_length / (double)(spam_length + nonspam_length); double p_spam_test = 0.0; double p_nonspam_test = 0.0; p_spam_test = (p_test_spam * p_spam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); p_nonspam_test = (p_test_nonspam * p_nonspam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); cout << "P(spam) = " << p_spam << endl; cout << "P(nonspam) = " << p_nonspam << endl; cout << "P(test|spam) = " << p_test_spam << endl; cout << "P(test|nonspam) = " << p_test_nonspam << endl; cout << "P(spam|test) = " << p_spam_test << endl; cout << "P(nonspam|test) = " << p_nonspam_test << endl; if (p_spam_test > p_nonspam) { cout << "Message is a spam." << endl; } else { cout << "Message is not a spam." << endl; } } void run_naive_bayes(bool gpu, int word_size) { binary_vector = new bool[words.size()]; for (int i = 0; i < words.size(); i++) binary_vector[i] = false; string example; string word; string separator = " "; size_t position; cout << "Input test sample: "; cin.ignore(); getline(cin, example); int known_words = 0; int unknown_words = 0; while (example.compare("") == 1) { position = example.find(separator); word = example.substr(0, position); //cout << line << endl; example.erase(0, position + 1); // Add word to vector of words if (find(words.begin(), words.end(), word) != words.end()) { /* words contains word */ int pos = distance(words.begin(), find(words.begin(), words.end(), word)); binary_vector[pos] = true; known_words++; } else { /* words does not contain word */ unknown_words++; } if (position == string::npos) { break; } } cout << "known_words: " << known_words << ", unkwnown_words: " << unknown_words << endl; if (gpu) { run_gpu(word_size); } else { run_cpu(word_size); } } int main() { // Init bool perform = true; bool ready = false; spam_file.open("spam_data.txt", ios::in | ios::out | ios::app); nonspam_file.open("nonspam_data.txt", ios::in | ios::out | ios::app); if (spam_file.good()==false) { cout << "Couldnt open spam_file."; getchar(); return 0; } if (nonspam_file.good() == false) { cout << "Couldnt open nonspam_file."; getchar(); return 0; } cout << "Naive Bayes Classifier as a anti-spam filter" << endl; int option; int word_size; while (perform) { cout << endl << " ---- Choose action (number): ---- " << endl; cout << "1. Add spam training examples. " << endl; cout << "2. Add non spam training examples. " << endl; cout << "3. Init Naive Bayes Algorithm." << endl; cout << "4. Run Naive Bayes Classifier (GPU)." << endl; cout << "5. Run Naive Bayes Classifier (CPU)." << endl; cout << "6. Print words and their occurencies." << endl; cout << "7. Print words and vectors" << endl; cout << "8. Exit." << endl; cin >> option; switch (option) { case 1: add_spam_training_examples(); ready = false; break; case 2: add_nonspam_training_examples(); ready = false; break; case 3: create_words_vector(); ready = true; break; case 4: if (ready) { cout << "Chose the word size:" << endl; cin >> word_size; run_naive_bayes(true, word_size); } else { cout << "Naive Bayes not initialized!" << endl; } break; case 5: if(ready) { cout << "Chose the word size:" << endl; cin >> word_size; run_naive_bayes(false, word_size); } break; case 6: print_words_and_occurencies(); break; case 7: print_vectors(); break; case 8: perform = false; break; default: cout << "Wrong!" << endl; } } // close txt files spam_file.close(); nonspam_file.close(); // delete tabs delete [] spam_vector; delete [] nonspam_vector; return 0; }
code for sm_80 Function : _Z25gpu_multiplication_reducePiS_S_PdPbS0_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff047624 */ /* 0x000fe200078e00ff */ /*0020*/ ULDC.64 UR36, c[0x0][0x118] ; /* 0x0000460000247ab9 */ /* 0x000fe20000000a00 */ /*0030*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff057624 */ /* 0x000fe200078e00ff */ /*0040*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */ /* 0x000fc80007ffe0ff */ /*0050*/ LDG.E R0, [R4.64] ; /* 0x0000002404007981 */ /* 0x000ea2000c1e1900 */ /*0060*/ IMAD.MOV.U32 R12, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff0c7624 */ /* 0x000fe400078e00ff */ /*0070*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff0d7624 */ /* 0x000fe400078e00ff */ /*0080*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff067624 */ /* 0x000fe400078e00ff */ /*0090*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff077624 */ /* 0x000fe200078e00ff */ /*00a0*/ LDG.E R2, [R12.64] ; /* 0x000000240c027981 */ /* 0x000ee8000c1e1900 */ /*00b0*/ LDG.E R6, [R6.64] ; /* 0x0000002406067981 */ /* 0x000f28000c1e1900 */ /*00c0*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e280000002100 */ /*00d0*/ S2R R10, SR_CTAID.X ; /* 0x00000000000a7919 */ /* 0x000e240000002500 */ /*00e0*/ IMAD R9, R10, c[0x0][0x0], R3 ; /* 0x000000000a097a24 */ /* 0x001fc800078e0203 */ /*00f0*/ IMAD R8, R10, c[0x0][0x0], R9 ; /* 0x000000000a087a24 */ /* 0x000fe200078e0209 */ /*0100*/ BSSY B0, 0x450 ; /* 0x0000034000007945 */ /* 0x000fe20003800000 */ /*0110*/ LEA.HI R11, R0, R0, RZ, 0x1 ; /* 0x00000000000b7211 */ /* 0x004fc800078f08ff */ /*0120*/ SHF.R.S32.HI R11, RZ, 0x1, R11 ; /* 0x00000001ff0b7819 */ /* 0x000fc8000001140b */ /*0130*/ IADD3 R5, P1, R8, R11.reuse, RZ ; /* 0x0000000b08057210 */ /* 0x080fe40007f3e0ff */ /*0140*/ SHF.R.S32.HI R9, RZ, 0x1f, R11 ; /* 0x0000001fff097819 */ /* 0x000fe4000001140b */ /*0150*/ ISETP.GE.AND P0, PT, R3, R11, PT ; /* 0x0000000b0300720c */ /* 0x000fe40003f06270 */ /*0160*/ ISETP.GE.U32.AND P2, PT, R5, R2, PT ; /* 0x000000020500720c */ /* 0x008fe20003f46070 */ /*0170*/ IMAD.X R5, RZ, RZ, R9, P1 ; /* 0x000000ffff057224 */ /* 0x000fe200008e0609 */ /*0180*/ ISETP.NE.OR P0, PT, R6, 0x1, P0 ; /* 0x000000010600780c */ /* 0x010fe40000705670 */ /*0190*/ SHF.R.S32.HI R4, RZ, 0x1f, R2 ; /* 0x0000001fff047819 */ /* 0x000fc80000011402 */ /*01a0*/ ISETP.GE.U32.OR.EX P0, PT, R5, R4, P0, P2 ; /* 0x000000040500720c */ /* 0x000fda0000706520 */ /*01b0*/ @P0 BRA 0x440 ; /* 0x0000028000000947 */ /* 0x000fea0003800000 */ /*01c0*/ IADD3 R6, P0, R8, c[0x0][0x180], RZ ; /* 0x0000600008067a10 */ /* 0x000fca0007f1e0ff */ /*01d0*/ IMAD.X R7, RZ, RZ, c[0x0][0x184], P0 ; /* 0x00006100ff077624 */ /* 0x000fca00000e06ff */ /*01e0*/ LDG.E.U8 R13, [R6.64] ; /* 0x00000024060d7981 */ /* 0x000ea2000c1e1100 */ /*01f0*/ IADD3 R14, P0, R11, R6, RZ ; /* 0x000000060b0e7210 */ /* 0x000fca0007f1e0ff */ /*0200*/ IMAD.X R15, R9, 0x1, R7, P0 ; /* 0x00000001090f7824 */ /* 0x000fca00000e0607 */ /*0210*/ LDG.E.U8 R5, [R14.64] ; /* 0x000000240e057981 */ /* 0x000162000c1e1100 */ /*0220*/ LEA R12, P0, R8.reuse, c[0x0][0x178], 0x3 ; /* 0x00005e00080c7a11 */ /* 0x040fe200078018ff */ /*0230*/ BSSY B1, 0x410 ; /* 0x000001d000017945 */ /* 0x000fe60003800000 */ /*0240*/ LEA.HI.X R16, R8, c[0x0][0x17c], RZ, 0x3, P0 ; /* 0x00005f0008107a11 */ /* 0x000fe400000f1cff */ /*0250*/ LEA R12, P1, R11, R12, 0x3 ; /* 0x0000000c0b0c7211 */ /* 0x000fe400078218ff */ /*0260*/ ISETP.NE.AND P0, PT, R13, RZ, PT ; /* 0x000000ff0d00720c */ /* 0x004fe40003f05270 */ /*0270*/ LEA.HI.X R13, R11, R16, R9, 0x3, P1 ; /* 0x000000100b0d7211 */ /* 0x000fd600008f1c09 */ /*0280*/ @!P0 BRA 0x350 ; /* 0x000000c000008947 */ /* 0x000fea0003800000 */ /*0290*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x021fe20003f05270 */ /*02a0*/ IMAD.MOV.U32 R15, RZ, RZ, 0x8 ; /* 0x00000008ff0f7424 */ /* 0x000fe200078e00ff */ /*02b0*/ LDG.E.64 R12, [R12.64] ; /* 0x000000240c0c7981 */ /* 0x000ea6000c1e1b00 */ /*02c0*/ IMAD.WIDE.U32 R14, R8, R15, c[0x0][0x178] ; /* 0x00005e00080e7625 */ /* 0x000fd000078e000f */ /*02d0*/ @!P0 LDG.E.64 R16, [R14.64] ; /* 0x000000240e108981 */ /* 0x000ee2000c1e1b00 */ /*02e0*/ @!P0 DADD R6, -R12, 1 ; /* 0x3ff000000c068429 */ /* 0x004ecc0000000100 */ /*02f0*/ @!P0 DMUL R16, R6, R16 ; /* 0x0000001006108228 */ /* 0x008e0e0000000000 */ /*0300*/ @!P0 STG.E.64 [R14.64], R16 ; /* 0x000000100e008986 */ /* 0x0011e8000c101b24 */ /*0310*/ @P0 LDG.E.64 R6, [R14.64] ; /* 0x000000240e060981 */ /* 0x000ea4000c1e1b00 */ /*0320*/ @P0 DMUL R6, R12, R6 ; /* 0x000000060c060228 */ /* 0x004e4e0000000000 */ /*0330*/ @P0 STG.E.64 [R14.64], R6 ; /* 0x000000060e000986 */ /* 0x0021e2000c101b24 */ /*0340*/ BRA 0x400 ; /* 0x000000b000007947 */ /* 0x000fea0003800000 */ /*0350*/ IMAD.MOV.U32 R7, RZ, RZ, 0x8 ; /* 0x00000008ff077424 */ /* 0x001fe200078e00ff */ /*0360*/ LDG.E.64 R12, [R12.64] ; /* 0x000000240c0c7981 */ /* 0x000ea6000c1e1b00 */ /*0370*/ IMAD.WIDE.U32 R6, R8, R7, c[0x0][0x178] ; /* 0x00005e0008067625 */ /* 0x000fca00078e0007 */ /*0380*/ LDG.E.64 R14, [R6.64] ; /* 0x00000024060e7981 */ /* 0x000ee2000c1e1b00 */ /*0390*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x020fda0003f05270 */ /*03a0*/ @!P0 DADD R16, -R12, 1 ; /* 0x3ff000000c108429 */ /* 0x004fc80000000100 */ /*03b0*/ DADD R14, -R14, 1 ; /* 0x3ff000000e0e7429 */ /* 0x008e0c0000000100 */ /*03c0*/ @!P0 DMUL R16, R14, R16 ; /* 0x000000100e108228 */ /* 0x001e080000000000 */ /*03d0*/ @P0 DMUL R14, R14, R12 ; /* 0x0000000c0e0e0228 */ /* 0x000e460000000000 */ /*03e0*/ @!P0 STG.E.64 [R6.64], R16 ; /* 0x0000001006008986 */ /* 0x0011e8000c101b24 */ /*03f0*/ @P0 STG.E.64 [R6.64], R14 ; /* 0x0000000e06000986 */ /* 0x0021e8000c101b24 */ /*0400*/ BSYNC B1 ; /* 0x0000000000017941 */ /* 0x000fea0003800000 */ /*0410*/ SHF.R.S32.HI R5, RZ, 0x1f, R0 ; /* 0x0000001fff057819 */ /* 0x000fc80000011400 */ /*0420*/ LEA.HI R5, R5, R0, RZ, 0x2 ; /* 0x0000000005057211 */ /* 0x000fc800078f10ff */ /*0430*/ SHF.R.S32.HI R11, RZ, 0x2, R5 ; /* 0x00000002ff0b7819 */ /* 0x000fe40000011405 */ /*0440*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0450*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0460*/ ISETP.GE.AND P0, PT, R11, 0x1, PT ; /* 0x000000010b00780c */ /* 0x000fda0003f06270 */ /*0470*/ @!P0 BRA 0x5e0 ; /* 0x0000016000008947 */ /* 0x000fea0003800000 */ /*0480*/ LEA R16, P0, R8, c[0x0][0x178], 0x3 ; /* 0x00005e0008107a11 */ /* 0x001fc800078018ff */ /*0490*/ LEA.HI.X R9, R8, c[0x0][0x17c], RZ, 0x3, P0 ; /* 0x00005f0008097a11 */ /* 0x000fe400000f1cff */ /*04a0*/ IADD3 R5, P1, R8, R11, RZ ; /* 0x0000000b08057210 */ /* 0x000fe40007f3e0ff */ /*04b0*/ SHF.R.S32.HI R0, RZ, 0x1f, R11 ; /* 0x0000001fff007819 */ /* 0x000fe4000001140b */ /*04c0*/ ISETP.GE.U32.AND P0, PT, R5, R2, PT ; /* 0x000000020500720c */ /* 0x000fc60003f06070 */ /*04d0*/ IMAD.X R5, RZ, RZ, R0, P1 ; /* 0x000000ffff057224 */ /* 0x000fca00008e0600 */ /*04e0*/ ISETP.GE.U32.AND.EX P0, PT, R5, R4, PT, P0 ; /* 0x000000040500720c */ /* 0x000fc80003f06100 */ /*04f0*/ ISETP.GE.OR P0, PT, R3, R11, P0 ; /* 0x0000000b0300720c */ /* 0x000fda0000706670 */ /*0500*/ @P0 BRA 0x590 ; /* 0x0000008000000947 */ /* 0x001fea0003800000 */ /*0510*/ LEA R12, P0, R11, R16, 0x3 ; /* 0x000000100b0c7211 */ /* 0x000fe200078018ff */ /*0520*/ IMAD.MOV.U32 R15, RZ, RZ, 0x8 ; /* 0x00000008ff0f7424 */ /* 0x000fc600078e00ff */ /*0530*/ LEA.HI.X R13, R11, R9, R0, 0x3, P0 ; /* 0x000000090b0d7211 */ /* 0x000fe200000f1c00 */ /*0540*/ IMAD.WIDE.U32 R14, R8, R15, c[0x0][0x178] ; /* 0x00005e00080e7625 */ /* 0x000fca00078e000f */ /*0550*/ LDG.E.64 R6, [R14.64] ; /* 0x000000240e067981 */ /* 0x000ea8000c1e1b00 */ /*0560*/ LDG.E.64 R12, [R12.64] ; /* 0x000000240c0c7981 */ /* 0x000ea4000c1e1b00 */ /*0570*/ DMUL R6, R6, R12 ; /* 0x0000000c06067228 */ /* 0x004e0e0000000000 */ /*0580*/ STG.E.64 [R14.64], R6 ; /* 0x000000060e007986 */ /* 0x0011e8000c101b24 */ /*0590*/ SHF.R.U32.HI R11, RZ, 0x1, R11 ; /* 0x00000001ff0b7819 */ /* 0x000fe2000001160b */ /*05a0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */ /* 0x000fe20003800000 */ /*05b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe40000010000 */ /*05c0*/ ISETP.NE.AND P0, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */ /* 0x000fda0003f05270 */ /*05d0*/ @P0 BRA 0x4a0 ; /* 0xfffffec000000947 */ /* 0x000fea000383ffff */ /*05e0*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */ /* 0x000fe40003f05270 */ /*05f0*/ IADD3 R6, P1, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */ /* 0x001fd60007f3e0ff */ /*0600*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0610*/ IMAD.MOV.U32 R11, RZ, RZ, 0x8 ; /* 0x00000008ff0b7424 */ /* 0x000fc800078e00ff */ /*0620*/ IMAD.WIDE.U32 R8, R8, R11, c[0x0][0x178] ; /* 0x00005e0008087625 */ /* 0x000fcc00078e000b */ /*0630*/ LDG.E.64 R8, [R8.64] ; /* 0x0000002408087981 */ /* 0x000ea2000c1e1b00 */ /*0640*/ MOV R2, 0x0 ; /* 0x0000000000027802 */ /* 0x000fe20000000f00 */ /*0650*/ IMAD.WIDE.U32 R10, R10, R11, c[0x0][0x188] ; /* 0x000062000a0a7625 */ /* 0x000fc800078e000b */ /*0660*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P1 ; /* 0x00000900ff077624 */ /* 0x000fe200008e06ff */ /*0670*/ LDC.64 R2, c[0x4][R2] ; /* 0x0100000002027b82 */ /* 0x000e220000000a00 */ /*0680*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */ /* 0x000fe400078e00ff */ /*0690*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */ /* 0x000fe200078e00ff */ /*06a0*/ STG.E.64 [R10.64], R8 ; /* 0x000000080a007986 */ /* 0x0043e8000c101b24 */ /*06b0*/ STL.64 [R1], R8 ; /* 0x0000000801007387 */ /* 0x0003e40000100a00 */ /*06c0*/ LEPC R8 ; /* 0x000000000008734e */ /* 0x003fe20000000000 */ /*06d0*/ MOV R11, 0x740 ; /* 0x00000740000b7802 */ /* 0x000fc40000000f00 */ /*06e0*/ MOV R20, 0x6c0 ; /* 0x000006c000147802 */ /* 0x000fe40000000f00 */ /*06f0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*0700*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*0710*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */ /* 0x000fc8000791e108 */ /*0720*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2509 */ /*0730*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */ /* 0x000fea0003c00000 */ /*0740*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0750*/ BRA 0x750; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0760*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0770*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0780*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0790*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <fstream> #include <string> #include <vector> #include <algorithm> #include <cstdlib> #include <cmath> #include <ctime> #include <time.h> #include <stdint.h> #include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" using namespace std; __global__ void gpu_multiplication_reduce(int *first, int *subarrayN, int *N, double* vectorIN, bool* vectorBIN, double *d_out) { //index size_t index = threadIdx.x + blockIdx.x * blockDim.x + (blockDim.x * blockIdx.x); int tid = threadIdx.x; int dim = *N; int offset = *subarrayN / 2; if (*first == 1 && tid < offset && index + offset < dim) { if (vectorBIN[index]) { if (vectorBIN[index + offset]) { vectorIN[index] *= vectorIN[index + offset]; } else { vectorIN[index] *= (1 - vectorIN[index + offset]); } } else if (vectorBIN[index + offset]) { vectorIN[index] = (1 - vectorIN[index]) * vectorIN[index + offset]; } else { vectorIN[index] = (1 - vectorIN[index]) * (1 - vectorIN[index + offset]); } offset = offset / 2; } __syncthreads(); for (int i = offset; i > 0; i >>= 1) { if (tid < i && index + i < dim) { vectorIN[index] *= vectorIN[index + i]; } __syncthreads(); } if (tid == 0) { d_out[blockIdx.x] = vectorIN[index]; printf("total is %f\n", d_out[blockIdx.x]); } } fstream spam_file; fstream nonspam_file; vector<string> words; vector<int> spam_occ; vector<int> nonspam_occ; double * spam_vector; double * nonspam_vector; bool * binary_vector; // m int spam_length; // n int nonspam_length; void print_words_and_occurencies() { for (size_t i = 0; i < words.size(); i++) { cout << words[i] << " " << spam_occ[i] << " " << nonspam_occ[i] << endl; } } void print_vectors() { cout << "spam_length: " << spam_length << ", nonspamlength: " << nonspam_length << endl; cout << "word_vector" << " " << "spam_vector" << " " << "nonspam_vector" << endl; for (int i = 0; i < words.size(); i++) { cout << words[i] << " " << spam_vector[i] << " " << nonspam_vector[i] << endl; } } void finish_vectors() { spam_vector = new double[words.size()]; nonspam_vector = new double[words.size()]; for (int i = 0; i < words.size(); i++) { spam_vector[i] = (double) spam_occ[i] / (double) spam_length; nonspam_vector[i] = (double) nonspam_occ[i] / (double) nonspam_length; } } void count_occurencies_length() { string line; int counter = -1; while (!spam_file.eof()) { counter++; getline(spam_file, line); } spam_length = counter; counter = -1; while (!nonspam_file.eof()) { counter++; getline(nonspam_file, line); } nonspam_length = counter; spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); } void add_spam_training_examples() { cout << "Add spam examples to the database (\"quit\" to exit function.)" << endl; string spam_example = ""; cin.ignore(); do { getline(cin, spam_example); if (spam_example.compare("quit")) { spam_file << spam_example << endl; } else { break; } } while (spam_example.compare("quit")); spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); } void add_nonspam_training_examples() { cout << "Add nonspam examples to the database (\"quit\" to exit function.)" << endl; string nonspam_example = ""; cin.ignore(); do { getline(cin, nonspam_example); if (nonspam_example.compare("quit")) { nonspam_file << nonspam_example << endl; } else { break; } } while (nonspam_example.compare("quit")); cout << "haha" << endl; spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); } void create_words_vector() { words.clear(); count_occurencies_length(); string word; string line; string separator = " "; size_t position; // spam_file-> words vector while (!spam_file.eof()) { getline(spam_file, line); position = line.find(separator); while (line.compare("")==1) { position = line.find(separator); word = line.substr(0, position); line.erase(0, position+1); // Add word to vector of words if (find(words.begin(), words.end(), word) != words.end()) { /* words contains word */ int pos = distance(words.begin(), find(words.begin(), words.end(), word)); spam_occ[pos] += 1; } else { /* words does not contain word */ words.push_back(word); int pos = distance(words.begin(), find(words.begin(), words.end(), word)); spam_occ.push_back(1); } if (position == string::npos) { break; } } } for (int i = 0; i < words.size(); i++) { nonspam_occ.push_back(0); } // nonspam_file-> words vector while (!nonspam_file.eof()) { getline(nonspam_file, line); position = line.find(separator); while (line.compare("") == 1) { position = line.find(separator); word = line.substr(0, position); line.erase(0, position + 1); // Add word to vector of words if (find(words.begin(), words.end(), word) != words.end()) { /* words contains word */ int pos = distance(words.begin(), find(words.begin(), words.end(), word)); nonspam_occ[pos] += 1; } else { /* words does not contain word */ words.push_back(word); int pos = distance(words.begin(), find(words.begin(), words.end(), word)); nonspam_occ.push_back(1); spam_occ.push_back(0); } if (position == string::npos) { break; } } } spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); finish_vectors(); } void run_cpu(int word_size) { double p_test_spam = 1.0; double p_test_nonspam = 1.0; double time = clock(); for (int i = 0; i < word_size; i++) { if (binary_vector[i]) { p_test_spam *= spam_vector[i]; p_test_nonspam *= nonspam_vector[i]; } else { p_test_spam *= (1.0-spam_vector[i]); p_test_nonspam *= (1.0 - nonspam_vector[i]); } } cout << "CPU time is " << clock() - time << endl; cout << "total (spam) = " << p_test_spam << endl; cout << "total (nonspam) = " << p_test_nonspam << endl; double p_spam = (double) spam_length / (double) (spam_length + nonspam_length); double p_nonspam = (double) nonspam_length / (double) (spam_length + nonspam_length); double p_spam_test = 0.0; double p_nonspam_test = 0.0; p_spam_test = (p_test_spam * p_spam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); p_nonspam_test = (p_test_nonspam * p_nonspam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); cout << "P(spam) = " << p_spam << endl; cout << "P(nonspam) = " << p_nonspam << endl; cout << "P(test|spam) = " << p_test_spam << endl; cout << "P(test|nonspam) = " << p_test_nonspam << endl; cout << "P(spam|test) = " << p_spam_test << endl; cout << "P(nonspam|test) = " << p_nonspam_test << endl; cout << "word count is" << words.size() << endl; if (p_spam_test > p_nonspam) { cout << "Message is a spam." << endl; } else { cout << "Message is not a spam." << endl; } } void run_gpu(int word_size) { double p_test_spam = 1.0; double p_test_nonspam = 1.0; // ------------------------------- CUDA ----------------------- int dim = word_size; int blockSize = 1024; size_t max_elems_per_block = blockSize * 2; // due to binary tree nature of algorithm size_t blocks = 0; if (dim <= max_elems_per_block) { blocks = (size_t) ceil(float(dim) / float(max_elems_per_block)); } else { blocks = dim / max_elems_per_block; if (dim % max_elems_per_block != 0) blocks++; } cout << "Blocks: " << blocks << endl; size_t double_array_dim = dim * sizeof(double); size_t bool_array_dim = dim * sizeof(bool); size_t out_dim = blocks * sizeof(double); double *vector = (double *)malloc(double_array_dim); bool *vector_binary = (bool *)malloc(bool_array_dim); double *dev_array; bool *dev_binary; double *dev_out; double *dev_total; int *first; int *subN; int *N; cudaMalloc(&dev_array, double_array_dim); cudaMalloc(&dev_binary, bool_array_dim); cudaMalloc(&dev_out, out_dim); cudaMallocManaged(&dev_total, sizeof(double)); cudaMallocManaged(&subN, sizeof(int)); cudaMallocManaged(&N, sizeof(int)); cudaMallocManaged(&first, sizeof(int)); *dev_total = 1.0; *first = 1; *N = dim; *subN = dim / blocks; cout << "Subarray count is " << *subN << endl; // NONSPAM_VECTOR // HOST ----> DEVICE if (cudaMemcpy(dev_array, nonspam_vector, double_array_dim, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "It could not be copied! 1" << endl; return; } if (cudaMemcpy(dev_binary, binary_vector, bool_array_dim, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "It could not be copied! 2" << endl; return; } cout << "Nonspam vector: " << endl; gpu_multiplication_reduce <<<blocks, blockSize >>>(first, subN, N, dev_array, dev_binary, dev_out); cudaDeviceSynchronize(); *first = 0; *subN = blocks; gpu_multiplication_reduce <<< 1, blocks >>>(first, subN, N, dev_out, dev_binary, dev_total); cudaDeviceSynchronize(); // the result is in the first element of the vector p_test_nonspam = dev_total[0]; // SPAM_VECTOR // HOST ----> DEVICE if (cudaMemcpy(dev_array, spam_vector, double_array_dim, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "It could not be copied! 4" << endl; return; } *first = 1; cout << "Spam vector: " << endl; *subN = dim / blocks; gpu_multiplication_reduce <<<blocks, blockSize >>>(first, subN, N, dev_array, dev_binary, dev_out); cudaDeviceSynchronize(); *first = 0; *subN = blocks; gpu_multiplication_reduce <<< 1, blocks >>>(first, subN, N, dev_out, dev_binary, dev_total); cudaDeviceSynchronize(); p_test_spam = dev_total[0]; free(vector); free(vector_binary); cudaFree(dev_array); cudaFree(dev_binary); cudaFree(dev_out); cudaFree(dev_total); cudaFree(first); // -------------------------------- CUDA END ------------------ double p_spam = (double)spam_length / (double)(spam_length + nonspam_length); double p_nonspam = (double)nonspam_length / (double)(spam_length + nonspam_length); double p_spam_test = 0.0; double p_nonspam_test = 0.0; p_spam_test = (p_test_spam * p_spam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); p_nonspam_test = (p_test_nonspam * p_nonspam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); cout << "P(spam) = " << p_spam << endl; cout << "P(nonspam) = " << p_nonspam << endl; cout << "P(test|spam) = " << p_test_spam << endl; cout << "P(test|nonspam) = " << p_test_nonspam << endl; cout << "P(spam|test) = " << p_spam_test << endl; cout << "P(nonspam|test) = " << p_nonspam_test << endl; if (p_spam_test > p_nonspam) { cout << "Message is a spam." << endl; } else { cout << "Message is not a spam." << endl; } } void run_naive_bayes(bool gpu, int word_size) { binary_vector = new bool[words.size()]; for (int i = 0; i < words.size(); i++) binary_vector[i] = false; string example; string word; string separator = " "; size_t position; cout << "Input test sample: "; cin.ignore(); getline(cin, example); int known_words = 0; int unknown_words = 0; while (example.compare("") == 1) { position = example.find(separator); word = example.substr(0, position); //cout << line << endl; example.erase(0, position + 1); // Add word to vector of words if (find(words.begin(), words.end(), word) != words.end()) { /* words contains word */ int pos = distance(words.begin(), find(words.begin(), words.end(), word)); binary_vector[pos] = true; known_words++; } else { /* words does not contain word */ unknown_words++; } if (position == string::npos) { break; } } cout << "known_words: " << known_words << ", unkwnown_words: " << unknown_words << endl; if (gpu) { run_gpu(word_size); } else { run_cpu(word_size); } } int main() { // Init bool perform = true; bool ready = false; spam_file.open("spam_data.txt", ios::in | ios::out | ios::app); nonspam_file.open("nonspam_data.txt", ios::in | ios::out | ios::app); if (spam_file.good()==false) { cout << "Couldnt open spam_file."; getchar(); return 0; } if (nonspam_file.good() == false) { cout << "Couldnt open nonspam_file."; getchar(); return 0; } cout << "Naive Bayes Classifier as a anti-spam filter" << endl; int option; int word_size; while (perform) { cout << endl << " ---- Choose action (number): ---- " << endl; cout << "1. Add spam training examples. " << endl; cout << "2. Add non spam training examples. " << endl; cout << "3. Init Naive Bayes Algorithm." << endl; cout << "4. Run Naive Bayes Classifier (GPU)." << endl; cout << "5. Run Naive Bayes Classifier (CPU)." << endl; cout << "6. Print words and their occurencies." << endl; cout << "7. Print words and vectors" << endl; cout << "8. Exit." << endl; cin >> option; switch (option) { case 1: add_spam_training_examples(); ready = false; break; case 2: add_nonspam_training_examples(); ready = false; break; case 3: create_words_vector(); ready = true; break; case 4: if (ready) { cout << "Chose the word size:" << endl; cin >> word_size; run_naive_bayes(true, word_size); } else { cout << "Naive Bayes not initialized!" << endl; } break; case 5: if(ready) { cout << "Chose the word size:" << endl; cin >> word_size; run_naive_bayes(false, word_size); } break; case 6: print_words_and_occurencies(); break; case 7: print_vectors(); break; case 8: perform = false; break; default: cout << "Wrong!" << endl; } } // close txt files spam_file.close(); nonspam_file.close(); // delete tabs delete [] spam_vector; delete [] nonspam_vector; return 0; }
#include <iostream> #include <fstream> #include <string> #include <vector> #include <algorithm> #include <cstdlib> #include <cmath> #include <ctime> #include <time.h> #include <stdint.h> #include <hip/hip_runtime.h> using namespace std; __global__ void gpu_multiplication_reduce(int *first, int *subarrayN, int *N, double* vectorIN, bool* vectorBIN, double *d_out) { //index size_t index = threadIdx.x + blockIdx.x * blockDim.x + (blockDim.x * blockIdx.x); int tid = threadIdx.x; int dim = *N; int offset = *subarrayN / 2; if (*first == 1 && tid < offset && index + offset < dim) { if (vectorBIN[index]) { if (vectorBIN[index + offset]) { vectorIN[index] *= vectorIN[index + offset]; } else { vectorIN[index] *= (1 - vectorIN[index + offset]); } } else if (vectorBIN[index + offset]) { vectorIN[index] = (1 - vectorIN[index]) * vectorIN[index + offset]; } else { vectorIN[index] = (1 - vectorIN[index]) * (1 - vectorIN[index + offset]); } offset = offset / 2; } __syncthreads(); for (int i = offset; i > 0; i >>= 1) { if (tid < i && index + i < dim) { vectorIN[index] *= vectorIN[index + i]; } __syncthreads(); } if (tid == 0) { d_out[blockIdx.x] = vectorIN[index]; printf("total is %f\n", d_out[blockIdx.x]); } } fstream spam_file; fstream nonspam_file; vector<string> words; vector<int> spam_occ; vector<int> nonspam_occ; double * spam_vector; double * nonspam_vector; bool * binary_vector; // m int spam_length; // n int nonspam_length; void print_words_and_occurencies() { for (size_t i = 0; i < words.size(); i++) { cout << words[i] << " " << spam_occ[i] << " " << nonspam_occ[i] << endl; } } void print_vectors() { cout << "spam_length: " << spam_length << ", nonspamlength: " << nonspam_length << endl; cout << "word_vector" << " " << "spam_vector" << " " << "nonspam_vector" << endl; for (int i = 0; i < words.size(); i++) { cout << words[i] << " " << spam_vector[i] << " " << nonspam_vector[i] << endl; } } void finish_vectors() { spam_vector = new double[words.size()]; nonspam_vector = new double[words.size()]; for (int i = 0; i < words.size(); i++) { spam_vector[i] = (double) spam_occ[i] / (double) spam_length; nonspam_vector[i] = (double) nonspam_occ[i] / (double) nonspam_length; } } void count_occurencies_length() { string line; int counter = -1; while (!spam_file.eof()) { counter++; getline(spam_file, line); } spam_length = counter; counter = -1; while (!nonspam_file.eof()) { counter++; getline(nonspam_file, line); } nonspam_length = counter; spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); } void add_spam_training_examples() { cout << "Add spam examples to the database (\"quit\" to exit function.)" << endl; string spam_example = ""; cin.ignore(); do { getline(cin, spam_example); if (spam_example.compare("quit")) { spam_file << spam_example << endl; } else { break; } } while (spam_example.compare("quit")); spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); } void add_nonspam_training_examples() { cout << "Add nonspam examples to the database (\"quit\" to exit function.)" << endl; string nonspam_example = ""; cin.ignore(); do { getline(cin, nonspam_example); if (nonspam_example.compare("quit")) { nonspam_file << nonspam_example << endl; } else { break; } } while (nonspam_example.compare("quit")); cout << "haha" << endl; spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); } void create_words_vector() { words.clear(); count_occurencies_length(); string word; string line; string separator = " "; size_t position; // spam_file-> words vector while (!spam_file.eof()) { getline(spam_file, line); position = line.find(separator); while (line.compare("")==1) { position = line.find(separator); word = line.substr(0, position); line.erase(0, position+1); // Add word to vector of words if (find(words.begin(), words.end(), word) != words.end()) { /* words contains word */ int pos = distance(words.begin(), find(words.begin(), words.end(), word)); spam_occ[pos] += 1; } else { /* words does not contain word */ words.push_back(word); int pos = distance(words.begin(), find(words.begin(), words.end(), word)); spam_occ.push_back(1); } if (position == string::npos) { break; } } } for (int i = 0; i < words.size(); i++) { nonspam_occ.push_back(0); } // nonspam_file-> words vector while (!nonspam_file.eof()) { getline(nonspam_file, line); position = line.find(separator); while (line.compare("") == 1) { position = line.find(separator); word = line.substr(0, position); line.erase(0, position + 1); // Add word to vector of words if (find(words.begin(), words.end(), word) != words.end()) { /* words contains word */ int pos = distance(words.begin(), find(words.begin(), words.end(), word)); nonspam_occ[pos] += 1; } else { /* words does not contain word */ words.push_back(word); int pos = distance(words.begin(), find(words.begin(), words.end(), word)); nonspam_occ.push_back(1); spam_occ.push_back(0); } if (position == string::npos) { break; } } } spam_file.clear(); spam_file.seekg(0, ios::beg); nonspam_file.clear(); nonspam_file.seekg(0, ios::beg); finish_vectors(); } void run_cpu(int word_size) { double p_test_spam = 1.0; double p_test_nonspam = 1.0; double time = clock(); for (int i = 0; i < word_size; i++) { if (binary_vector[i]) { p_test_spam *= spam_vector[i]; p_test_nonspam *= nonspam_vector[i]; } else { p_test_spam *= (1.0-spam_vector[i]); p_test_nonspam *= (1.0 - nonspam_vector[i]); } } cout << "CPU time is " << clock() - time << endl; cout << "total (spam) = " << p_test_spam << endl; cout << "total (nonspam) = " << p_test_nonspam << endl; double p_spam = (double) spam_length / (double) (spam_length + nonspam_length); double p_nonspam = (double) nonspam_length / (double) (spam_length + nonspam_length); double p_spam_test = 0.0; double p_nonspam_test = 0.0; p_spam_test = (p_test_spam * p_spam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); p_nonspam_test = (p_test_nonspam * p_nonspam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); cout << "P(spam) = " << p_spam << endl; cout << "P(nonspam) = " << p_nonspam << endl; cout << "P(test|spam) = " << p_test_spam << endl; cout << "P(test|nonspam) = " << p_test_nonspam << endl; cout << "P(spam|test) = " << p_spam_test << endl; cout << "P(nonspam|test) = " << p_nonspam_test << endl; cout << "word count is" << words.size() << endl; if (p_spam_test > p_nonspam) { cout << "Message is a spam." << endl; } else { cout << "Message is not a spam." << endl; } } void run_gpu(int word_size) { double p_test_spam = 1.0; double p_test_nonspam = 1.0; // ------------------------------- CUDA ----------------------- int dim = word_size; int blockSize = 1024; size_t max_elems_per_block = blockSize * 2; // due to binary tree nature of algorithm size_t blocks = 0; if (dim <= max_elems_per_block) { blocks = (size_t) ceil(float(dim) / float(max_elems_per_block)); } else { blocks = dim / max_elems_per_block; if (dim % max_elems_per_block != 0) blocks++; } cout << "Blocks: " << blocks << endl; size_t double_array_dim = dim * sizeof(double); size_t bool_array_dim = dim * sizeof(bool); size_t out_dim = blocks * sizeof(double); double *vector = (double *)malloc(double_array_dim); bool *vector_binary = (bool *)malloc(bool_array_dim); double *dev_array; bool *dev_binary; double *dev_out; double *dev_total; int *first; int *subN; int *N; hipMalloc(&dev_array, double_array_dim); hipMalloc(&dev_binary, bool_array_dim); hipMalloc(&dev_out, out_dim); hipMallocManaged(&dev_total, sizeof(double)); hipMallocManaged(&subN, sizeof(int)); hipMallocManaged(&N, sizeof(int)); hipMallocManaged(&first, sizeof(int)); *dev_total = 1.0; *first = 1; *N = dim; *subN = dim / blocks; cout << "Subarray count is " << *subN << endl; // NONSPAM_VECTOR // HOST ----> DEVICE if (hipMemcpy(dev_array, nonspam_vector, double_array_dim, hipMemcpyHostToDevice) != hipSuccess) { cout << "It could not be copied! 1" << endl; return; } if (hipMemcpy(dev_binary, binary_vector, bool_array_dim, hipMemcpyHostToDevice) != hipSuccess) { cout << "It could not be copied! 2" << endl; return; } cout << "Nonspam vector: " << endl; gpu_multiplication_reduce <<<blocks, blockSize >>>(first, subN, N, dev_array, dev_binary, dev_out); hipDeviceSynchronize(); *first = 0; *subN = blocks; gpu_multiplication_reduce <<< 1, blocks >>>(first, subN, N, dev_out, dev_binary, dev_total); hipDeviceSynchronize(); // the result is in the first element of the vector p_test_nonspam = dev_total[0]; // SPAM_VECTOR // HOST ----> DEVICE if (hipMemcpy(dev_array, spam_vector, double_array_dim, hipMemcpyHostToDevice) != hipSuccess) { cout << "It could not be copied! 4" << endl; return; } *first = 1; cout << "Spam vector: " << endl; *subN = dim / blocks; gpu_multiplication_reduce <<<blocks, blockSize >>>(first, subN, N, dev_array, dev_binary, dev_out); hipDeviceSynchronize(); *first = 0; *subN = blocks; gpu_multiplication_reduce <<< 1, blocks >>>(first, subN, N, dev_out, dev_binary, dev_total); hipDeviceSynchronize(); p_test_spam = dev_total[0]; free(vector); free(vector_binary); hipFree(dev_array); hipFree(dev_binary); hipFree(dev_out); hipFree(dev_total); hipFree(first); // -------------------------------- CUDA END ------------------ double p_spam = (double)spam_length / (double)(spam_length + nonspam_length); double p_nonspam = (double)nonspam_length / (double)(spam_length + nonspam_length); double p_spam_test = 0.0; double p_nonspam_test = 0.0; p_spam_test = (p_test_spam * p_spam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); p_nonspam_test = (p_test_nonspam * p_nonspam) / (p_test_nonspam * p_nonspam + p_test_spam * p_spam); cout << "P(spam) = " << p_spam << endl; cout << "P(nonspam) = " << p_nonspam << endl; cout << "P(test|spam) = " << p_test_spam << endl; cout << "P(test|nonspam) = " << p_test_nonspam << endl; cout << "P(spam|test) = " << p_spam_test << endl; cout << "P(nonspam|test) = " << p_nonspam_test << endl; if (p_spam_test > p_nonspam) { cout << "Message is a spam." << endl; } else { cout << "Message is not a spam." << endl; } } void run_naive_bayes(bool gpu, int word_size) { binary_vector = new bool[words.size()]; for (int i = 0; i < words.size(); i++) binary_vector[i] = false; string example; string word; string separator = " "; size_t position; cout << "Input test sample: "; cin.ignore(); getline(cin, example); int known_words = 0; int unknown_words = 0; while (example.compare("") == 1) { position = example.find(separator); word = example.substr(0, position); //cout << line << endl; example.erase(0, position + 1); // Add word to vector of words if (find(words.begin(), words.end(), word) != words.end()) { /* words contains word */ int pos = distance(words.begin(), find(words.begin(), words.end(), word)); binary_vector[pos] = true; known_words++; } else { /* words does not contain word */ unknown_words++; } if (position == string::npos) { break; } } cout << "known_words: " << known_words << ", unkwnown_words: " << unknown_words << endl; if (gpu) { run_gpu(word_size); } else { run_cpu(word_size); } } int main() { // Init bool perform = true; bool ready = false; spam_file.open("spam_data.txt", ios::in | ios::out | ios::app); nonspam_file.open("nonspam_data.txt", ios::in | ios::out | ios::app); if (spam_file.good()==false) { cout << "Couldnt open spam_file."; getchar(); return 0; } if (nonspam_file.good() == false) { cout << "Couldnt open nonspam_file."; getchar(); return 0; } cout << "Naive Bayes Classifier as a anti-spam filter" << endl; int option; int word_size; while (perform) { cout << endl << " ---- Choose action (number): ---- " << endl; cout << "1. Add spam training examples. " << endl; cout << "2. Add non spam training examples. " << endl; cout << "3. Init Naive Bayes Algorithm." << endl; cout << "4. Run Naive Bayes Classifier (GPU)." << endl; cout << "5. Run Naive Bayes Classifier (CPU)." << endl; cout << "6. Print words and their occurencies." << endl; cout << "7. Print words and vectors" << endl; cout << "8. Exit." << endl; cin >> option; switch (option) { case 1: add_spam_training_examples(); ready = false; break; case 2: add_nonspam_training_examples(); ready = false; break; case 3: create_words_vector(); ready = true; break; case 4: if (ready) { cout << "Chose the word size:" << endl; cin >> word_size; run_naive_bayes(true, word_size); } else { cout << "Naive Bayes not initialized!" << endl; } break; case 5: if(ready) { cout << "Chose the word size:" << endl; cin >> word_size; run_naive_bayes(false, word_size); } break; case 6: print_words_and_occurencies(); break; case 7: print_vectors(); break; case 8: perform = false; break; default: cout << "Wrong!" << endl; } } // close txt files spam_file.close(); nonspam_file.close(); // delete tabs delete [] spam_vector; delete [] nonspam_vector; return 0; }
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> __global__ void hellogpu (void) { printf ("Hello world from GPU!! from %d \n", threadIdx.x); } int main(void) { printf("Hello world from CPU!! \n"); hellogpu <<<2, 10>>>(); cudaDeviceReset (); return 0; }
code for sm_80 Function : _Z8hellogpuv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fc800078e00ff */ /*0010*/ S2R R8, SR_TID.X ; /* 0x0000000000087919 */ /* 0x000e220000002100 */ /*0020*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */ /* 0x000fe20007ffe0ff */ /*0030*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */ /* 0x000fe200078e00ff */ /*0040*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe20000000f00 */ /*0050*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */ /* 0x000fe200078e00ff */ /*0060*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */ /* 0x000fe40007f1e0ff */ /*0070*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */ /* 0x0002a60000000a00 */ /*0080*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */ /* 0x000fe200000e06ff */ /*0090*/ STL [R1], R8 ; /* 0x0000000801007387 */ /* 0x0013e80000100800 */ /*00a0*/ LEPC R8 ; /* 0x000000000008734e */ /* 0x002fc60000000000 */ /*00b0*/ MOV R11, 0x120 ; /* 0x00000120000b7802 */ /* 0x000fe40000000f00 */ /*00c0*/ MOV R20, 0xa0 ; /* 0x000000a000147802 */ /* 0x000fc40000000f00 */ /*00d0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*00e0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*00f0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */ /* 0x000fc8000791e108 */ /*0100*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2509 */ /*0110*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */ /* 0x004fea0003c00000 */ /*0120*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0130*/ BRA 0x130; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> __global__ void hellogpu (void) { printf ("Hello world from GPU!! from %d \n", threadIdx.x); } int main(void) { printf("Hello world from CPU!! \n"); hellogpu <<<2, 10>>>(); cudaDeviceReset (); return 0; }
.file "tmpxft_001260f1_00000000-6_hello.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z26__device_stub__Z8hellogpuvv .type _Z26__device_stub__Z8hellogpuvv, @function _Z26__device_stub__Z8hellogpuvv: .LFB2082: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z8hellogpuv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z26__device_stub__Z8hellogpuvv, .-_Z26__device_stub__Z8hellogpuvv .globl _Z8hellogpuv .type _Z8hellogpuv, @function _Z8hellogpuv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z8hellogpuvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z8hellogpuv, .-_Z8hellogpuv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Hello world from CPU!! \n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $10, 20(%rsp) movl $1, 24(%rsp) movl $2, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: call cudaDeviceReset@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z26__device_stub__Z8hellogpuvv jmp .L12 .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z8hellogpuv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z8hellogpuv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> __global__ void hellogpu (void) { printf ("Hello world from GPU!! from %d \n", threadIdx.x); } int main(void) { printf("Hello world from CPU!! \n"); hellogpu <<<2, 10>>>(); cudaDeviceReset (); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void hellogpu (void) { printf ("Hello world from GPU!! from %d \n", threadIdx.x); } int main(void) { printf("Hello world from CPU!! \n"); hellogpu <<<2, 10>>>(); hipDeviceReset (); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void hellogpu (void) { printf ("Hello world from GPU!! from %d \n", threadIdx.x); } int main(void) { printf("Hello world from CPU!! \n"); hellogpu <<<2, 10>>>(); hipDeviceReset (); return 0; }
.text .file "hello.hip" .globl _Z23__device_stub__hellogpuv # -- Begin function _Z23__device_stub__hellogpuv .p2align 4, 0x90 .type _Z23__device_stub__hellogpuv,@function _Z23__device_stub__hellogpuv: # @_Z23__device_stub__hellogpuv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z8hellogpuv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z23__device_stub__hellogpuv, .Lfunc_end0-_Z23__device_stub__hellogpuv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 movl $.Lstr, %edi callq puts@PLT movabsq $4294967298, %rdi # imm = 0x100000002 leaq 8(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z8hellogpuv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceReset xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8hellogpuv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z8hellogpuv,@object # @_Z8hellogpuv .section .rodata,"a",@progbits .globl _Z8hellogpuv .p2align 3, 0x0 _Z8hellogpuv: .quad _Z23__device_stub__hellogpuv .size _Z8hellogpuv, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z8hellogpuv" .size .L__unnamed_1, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Hello world from CPU!! " .size .Lstr, 24 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z23__device_stub__hellogpuv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z8hellogpuv .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001260f1_00000000-6_hello.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z26__device_stub__Z8hellogpuvv .type _Z26__device_stub__Z8hellogpuvv, @function _Z26__device_stub__Z8hellogpuvv: .LFB2082: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z8hellogpuv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z26__device_stub__Z8hellogpuvv, .-_Z26__device_stub__Z8hellogpuvv .globl _Z8hellogpuv .type _Z8hellogpuv, @function _Z8hellogpuv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z8hellogpuvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z8hellogpuv, .-_Z8hellogpuv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Hello world from CPU!! \n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $10, 20(%rsp) movl $1, 24(%rsp) movl $2, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: call cudaDeviceReset@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z26__device_stub__Z8hellogpuvv jmp .L12 .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z8hellogpuv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z8hellogpuv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "hello.hip" .globl _Z23__device_stub__hellogpuv # -- Begin function _Z23__device_stub__hellogpuv .p2align 4, 0x90 .type _Z23__device_stub__hellogpuv,@function _Z23__device_stub__hellogpuv: # @_Z23__device_stub__hellogpuv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z8hellogpuv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z23__device_stub__hellogpuv, .Lfunc_end0-_Z23__device_stub__hellogpuv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 movl $.Lstr, %edi callq puts@PLT movabsq $4294967298, %rdi # imm = 0x100000002 leaq 8(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z8hellogpuv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceReset xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8hellogpuv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z8hellogpuv,@object # @_Z8hellogpuv .section .rodata,"a",@progbits .globl _Z8hellogpuv .p2align 3, 0x0 _Z8hellogpuv: .quad _Z23__device_stub__hellogpuv .size _Z8hellogpuv, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z8hellogpuv" .size .L__unnamed_1, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Hello world from CPU!! " .size .Lstr, 24 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z23__device_stub__hellogpuv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z8hellogpuv .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include<iostream> using namespace std; __global__ void kernel() { } int main(){ kernel<<<1,1>>>(); cout<<"Hello, World"<<endl; return 0; }
code for sm_80 Function : _Z6kernelv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include<iostream> using namespace std; __global__ void kernel() { } int main(){ kernel<<<1,1>>>(); cout<<"Hello, World"<<endl; return 0; }
.file "tmpxft_000427c2_00000000-6_kernel.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z24__device_stub__Z6kernelvv .type _Z24__device_stub__Z6kernelvv, @function _Z24__device_stub__Z6kernelvv: .LFB3694: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z6kernelv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE3694: .size _Z24__device_stub__Z6kernelvv, .-_Z24__device_stub__Z6kernelvv .globl _Z6kernelv .type _Z6kernelv, @function _Z6kernelv: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z24__device_stub__Z6kernelvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _Z6kernelv, .-_Z6kernelv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Hello, World" .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z24__device_stub__Z6kernelvv jmp .L12 .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z6kernelv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z6kernelv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include<iostream> using namespace std; __global__ void kernel() { } int main(){ kernel<<<1,1>>>(); cout<<"Hello, World"<<endl; return 0; }
#include <hip/hip_runtime.h> #include<iostream> using namespace std; __global__ void kernel() { } int main(){ kernel<<<1,1>>>(); cout<<"Hello, World"<<endl; return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include<iostream> using namespace std; __global__ void kernel() { } int main(){ kernel<<<1,1>>>(); cout<<"Hello, World"<<endl; return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6kernelv .globl _Z6kernelv .p2align 8 .type _Z6kernelv,@function _Z6kernelv: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6kernelv .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 0 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 0 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 1 .amdhsa_next_free_sgpr 1 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6kernelv, .Lfunc_end0-_Z6kernelv .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: [] .group_segment_fixed_size: 0 .kernarg_segment_align: 4 .kernarg_segment_size: 0 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6kernelv .private_segment_fixed_size: 0 .sgpr_count: 0 .sgpr_spill_count: 0 .symbol: _Z6kernelv.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 0 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include<iostream> using namespace std; __global__ void kernel() { } int main(){ kernel<<<1,1>>>(); cout<<"Hello, World"<<endl; return 0; }
.text .file "kernel.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z21__device_stub__kernelv # -- Begin function _Z21__device_stub__kernelv .p2align 4, 0x90 .type _Z21__device_stub__kernelv,@function _Z21__device_stub__kernelv: # @_Z21__device_stub__kernelv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z6kernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z21__device_stub__kernelv, .Lfunc_end0-_Z21__device_stub__kernelv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $64, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -16 movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z6kernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: movl $_ZSt4cout, %edi movl $.L.str, %esi movl $12, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB1_7 # %bb.3: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%rbx) je .LBB1_5 # %bb.4: movzbl 67(%rbx), %eax jmp .LBB1_6 .LBB1_5: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB1_6: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv xorl %eax, %eax addq $64, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .LBB1_7: .cfi_def_cfa_offset 80 callq _ZSt16__throw_bad_castv .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6kernelv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z6kernelv,@object # @_Z6kernelv .section .rodata,"a",@progbits .globl _Z6kernelv .p2align 3, 0x0 _Z6kernelv: .quad _Z21__device_stub__kernelv .size _Z6kernelv, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Hello, World" .size .L.str, 13 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6kernelv" .size .L__unnamed_1, 11 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__kernelv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6kernelv .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z6kernelv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6kernelv .globl _Z6kernelv .p2align 8 .type _Z6kernelv,@function _Z6kernelv: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6kernelv .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 0 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 0 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 1 .amdhsa_next_free_sgpr 1 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6kernelv, .Lfunc_end0-_Z6kernelv .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: [] .group_segment_fixed_size: 0 .kernarg_segment_align: 4 .kernarg_segment_size: 0 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6kernelv .private_segment_fixed_size: 0 .sgpr_count: 0 .sgpr_spill_count: 0 .symbol: _Z6kernelv.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 0 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000427c2_00000000-6_kernel.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z24__device_stub__Z6kernelvv .type _Z24__device_stub__Z6kernelvv, @function _Z24__device_stub__Z6kernelvv: .LFB3694: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z6kernelv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE3694: .size _Z24__device_stub__Z6kernelvv, .-_Z24__device_stub__Z6kernelvv .globl _Z6kernelv .type _Z6kernelv, @function _Z6kernelv: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z24__device_stub__Z6kernelvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _Z6kernelv, .-_Z6kernelv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Hello, World" .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z24__device_stub__Z6kernelvv jmp .L12 .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z6kernelv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z6kernelv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "kernel.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z21__device_stub__kernelv # -- Begin function _Z21__device_stub__kernelv .p2align 4, 0x90 .type _Z21__device_stub__kernelv,@function _Z21__device_stub__kernelv: # @_Z21__device_stub__kernelv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z6kernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z21__device_stub__kernelv, .Lfunc_end0-_Z21__device_stub__kernelv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $64, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -16 movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z6kernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: movl $_ZSt4cout, %edi movl $.L.str, %esi movl $12, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB1_7 # %bb.3: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%rbx) je .LBB1_5 # %bb.4: movzbl 67(%rbx), %eax jmp .LBB1_6 .LBB1_5: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB1_6: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv xorl %eax, %eax addq $64, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .LBB1_7: .cfi_def_cfa_offset 80 callq _ZSt16__throw_bad_castv .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6kernelv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z6kernelv,@object # @_Z6kernelv .section .rodata,"a",@progbits .globl _Z6kernelv .p2align 3, 0x0 _Z6kernelv: .quad _Z21__device_stub__kernelv .size _Z6kernelv, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Hello, World" .size .L.str, 13 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6kernelv" .size .L__unnamed_1, 11 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__kernelv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6kernelv .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#ifdef _GLIBCXX_USE_INT128 #undef _GLIBCXX_USE_INT128 #endif #ifdef _GLIBCXX_ATOMIC_BUILTINS #undef _GLIBCXX_ATOMIC_BUILTINS #endif #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <thrust/binary_search.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/adjacent_difference.h> #include <iostream> #include <iterator> #include <cstdlib> // This example compute the histogram [1] and cumulative // histogram of an array of integer values. // // [1] http://en.wikipedia.org/wiki/Histogram int main(void) { const size_t N = 30; // generate random data on the host thrust::host_vector<int> h_data(N); for(size_t i = 0; i < N; i++) h_data[i] = rand() % 10; // transfer data to device thrust::device_vector<int> d_data(h_data); // print the initial data std::cout << "initial data" << std::endl; thrust::copy(d_data.begin(), d_data.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl; // sort data to bring equal elements together thrust::sort(d_data.begin(), d_data.end()); // print the sorted data std::cout << "sorted data" << std::endl; thrust::copy(d_data.begin(), d_data.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl; // number of histogram bins is equal to the maximum value plus one const int num_bins = d_data.back() + 1; // allocate storage for the cumulative histogram and histogram thrust::device_vector<int> d_cumulative_histogram(num_bins); thrust::device_vector<int> d_histogram(num_bins); // find the end of each bin of values thrust::counting_iterator<int> search_begin(0); thrust::upper_bound(d_data.begin(), d_data.end(), search_begin, search_begin + num_bins, d_cumulative_histogram.begin()); // print the cumulative histogram std::cout << "cumulative histogram" << std::endl; thrust::copy(d_cumulative_histogram.begin(), d_cumulative_histogram.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl; // compute the histogram by taking differences of the cumulative histogram thrust::adjacent_difference(d_cumulative_histogram.begin(), d_cumulative_histogram.end(), d_histogram.begin()); // print the histogram std::cout << "histogram" << std::endl; thrust::copy(d_histogram.begin(), d_histogram.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl; std::cout << "TEST PASSED\n"; return 0; }
#ifdef _GLIBCXX_USE_INT128 #undef _GLIBCXX_USE_INT128 #endif #ifdef _GLIBCXX_ATOMIC_BUILTINS #undef _GLIBCXX_ATOMIC_BUILTINS #endif #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <thrust/binary_search.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/adjacent_difference.h> #include <iostream> #include <iterator> #include <cstdlib> // This example compute the histogram [1] and cumulative // histogram of an array of integer values. // // [1] http://en.wikipedia.org/wiki/Histogram int main(void) { const size_t N = 30; // generate random data on the host thrust::host_vector<int> h_data(N); for(size_t i = 0; i < N; i++) h_data[i] = rand() % 10; // transfer data to device thrust::device_vector<int> d_data(h_data); // print the initial data std::cout << "initial data" << std::endl; thrust::copy(d_data.begin(), d_data.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl; // sort data to bring equal elements together thrust::sort(d_data.begin(), d_data.end()); // print the sorted data std::cout << "sorted data" << std::endl; thrust::copy(d_data.begin(), d_data.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl; // number of histogram bins is equal to the maximum value plus one const int num_bins = d_data.back() + 1; // allocate storage for the cumulative histogram and histogram thrust::device_vector<int> d_cumulative_histogram(num_bins); thrust::device_vector<int> d_histogram(num_bins); // find the end of each bin of values thrust::counting_iterator<int> search_begin(0); thrust::upper_bound(d_data.begin(), d_data.end(), search_begin, search_begin + num_bins, d_cumulative_histogram.begin()); // print the cumulative histogram std::cout << "cumulative histogram" << std::endl; thrust::copy(d_cumulative_histogram.begin(), d_cumulative_histogram.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl; // compute the histogram by taking differences of the cumulative histogram thrust::adjacent_difference(d_cumulative_histogram.begin(), d_cumulative_histogram.end(), d_histogram.begin()); // print the histogram std::cout << "histogram" << std::endl; thrust::copy(d_histogram.begin(), d_histogram.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl; std::cout << "TEST PASSED\n"; return 0; }
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <string.h> #include <stdio.h> #include <stdlib.h> __global__ void vectAdd(char *a, char *b, char *c, char *res, int len) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; res[i] = a[i] + b[i] + c[i]; } /* Function computing the final string to print */ void compute_string(char *res, char *a, char *b, char *c, int length) { char *d_a, *d_b, *d_c, *d_res; cudaMalloc(&d_a, length * sizeof(char)); cudaMalloc(&d_b, length * sizeof(char)); cudaMalloc(&d_c, length * sizeof(char)); cudaMalloc(&d_res, length * sizeof(char)); cudaMemcpy(d_a, a, length * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, length * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, length * sizeof(char), cudaMemcpyHostToDevice); dim3 dimBlock(30); dim3 dimGrid(1); vectAdd<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, d_res, length); cudaMemcpy(res, d_res, length * sizeof(char), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_res); } int main() { char *res; char a[30] = {40, 70, 70, 70, 80, 0, 50, 80, 80, 70, 70, 0, 40, 80, 79, 70, 0, 40, 50, 50, 0, 70, 80, 0, 30, 50, 30, 30, 0, 0}; char b[30] = {10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0}; char c[30] = {22, 21, 28, 28, 21, 22, 27, 21, 24, 28, 20, 22, 20, 24, 22, 29, 22, 21, 20, 25, 22, 25, 20, 22, 27, 25, 28, 25, 0, 0}; res = (char *)malloc(30 * sizeof(char)); /* This function call should be programmed in CUDA */ /* -> need to allocate and transfer data to/from the device */ compute_string(res, a, b, c, 30); printf("%s\n", res); return 0; }
code for sm_80 Function : _Z7vectAddPcS_S_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0040*/ IMAD R8, R8, c[0x0][0x0], R3 ; /* 0x0000000008087a24 */ /* 0x001fca00078e0203 */ /*0050*/ SHF.R.S32.HI R0, RZ, 0x1f, R8 ; /* 0x0000001fff007819 */ /* 0x000fe40000011408 */ /*0060*/ IADD3 R4, P1, R8.reuse, c[0x0][0x168], RZ ; /* 0x00005a0008047a10 */ /* 0x040fe40007f3e0ff */ /*0070*/ IADD3 R2, P0, R8.reuse, c[0x0][0x160], RZ ; /* 0x0000580008027a10 */ /* 0x040fe40007f1e0ff */ /*0080*/ IADD3 R6, P2, R8, c[0x0][0x170], RZ ; /* 0x00005c0008067a10 */ /* 0x000fe40007f5e0ff */ /*0090*/ IADD3.X R5, R0.reuse, c[0x0][0x16c], RZ, P1, !PT ; /* 0x00005b0000057a10 */ /* 0x040fe40000ffe4ff */ /*00a0*/ IADD3.X R3, R0, c[0x0][0x164], RZ, P0, !PT ; /* 0x0000590000037a10 */ /* 0x000fc400007fe4ff */ /*00b0*/ IADD3.X R7, R0, c[0x0][0x174], RZ, P2, !PT ; /* 0x00005d0000077a10 */ /* 0x000fe400017fe4ff */ /*00c0*/ LDG.E.U8 R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea8000c1e1100 */ /*00d0*/ LDG.E.U8 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1100 */ /*00e0*/ LDG.E.U8 R6, [R6.64] ; /* 0x0000000406067981 */ /* 0x000ea2000c1e1100 */ /*00f0*/ IADD3 R8, P0, R8, c[0x0][0x178], RZ ; /* 0x00005e0008087a10 */ /* 0x000fc80007f1e0ff */ /*0100*/ IADD3.X R9, R0, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f0000097a10 */ /* 0x000fe400007fe4ff */ /*0110*/ IADD3 R11, R6, R5, R2 ; /* 0x00000005060b7210 */ /* 0x004fca0007ffe002 */ /*0120*/ STG.E.U8 [R8.64], R11 ; /* 0x0000000b08007986 */ /* 0x000fe2000c101104 */ /*0130*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0140*/ BRA 0x140; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <string.h> #include <stdio.h> #include <stdlib.h> __global__ void vectAdd(char *a, char *b, char *c, char *res, int len) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; res[i] = a[i] + b[i] + c[i]; } /* Function computing the final string to print */ void compute_string(char *res, char *a, char *b, char *c, int length) { char *d_a, *d_b, *d_c, *d_res; cudaMalloc(&d_a, length * sizeof(char)); cudaMalloc(&d_b, length * sizeof(char)); cudaMalloc(&d_c, length * sizeof(char)); cudaMalloc(&d_res, length * sizeof(char)); cudaMemcpy(d_a, a, length * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, length * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, length * sizeof(char), cudaMemcpyHostToDevice); dim3 dimBlock(30); dim3 dimGrid(1); vectAdd<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, d_res, length); cudaMemcpy(res, d_res, length * sizeof(char), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_res); } int main() { char *res; char a[30] = {40, 70, 70, 70, 80, 0, 50, 80, 80, 70, 70, 0, 40, 80, 79, 70, 0, 40, 50, 50, 0, 70, 80, 0, 30, 50, 30, 30, 0, 0}; char b[30] = {10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0}; char c[30] = {22, 21, 28, 28, 21, 22, 27, 21, 24, 28, 20, 22, 20, 24, 22, 29, 22, 21, 20, 25, 22, 25, 20, 22, 27, 25, 28, 25, 0, 0}; res = (char *)malloc(30 * sizeof(char)); /* This function call should be programmed in CUDA */ /* -> need to allocate and transfer data to/from the device */ compute_string(res, a, b, c, 30); printf("%s\n", res); return 0; }
.file "tmpxft_001b388a_00000000-6_hello_cpu.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z33__device_stub__Z7vectAddPcS_S_S_iPcS_S_S_i .type _Z33__device_stub__Z7vectAddPcS_S_S_iPcS_S_S_i, @function _Z33__device_stub__Z7vectAddPcS_S_S_iPcS_S_S_i: .LFB2083: .cfi_startproc endbr64 subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movl %r8d, 12(%rsp) movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 12(%rsp), %rax movq %rax, 144(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 152(%rsp), %rax subq %fs:40, %rax jne .L8 addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 184 pushq 56(%rsp) .cfi_def_cfa_offset 192 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z7vectAddPcS_S_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z33__device_stub__Z7vectAddPcS_S_S_iPcS_S_S_i, .-_Z33__device_stub__Z7vectAddPcS_S_S_iPcS_S_S_i .globl _Z7vectAddPcS_S_S_i .type _Z7vectAddPcS_S_S_i, @function _Z7vectAddPcS_S_S_i: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z33__device_stub__Z7vectAddPcS_S_S_iPcS_S_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z7vectAddPcS_S_S_i, .-_Z7vectAddPcS_S_S_i .globl _Z14compute_stringPcS_S_S_i .type _Z14compute_stringPcS_S_S_i, @function _Z14compute_stringPcS_S_S_i: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $72, %rsp .cfi_def_cfa_offset 128 movq %rdi, %r12 movq %rsi, %r15 movq %rdx, %r14 movq %rcx, %r13 movl %r8d, %ebp movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movslq %r8d, %rbx movq %rsp, %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 8(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 16(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 24(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbx, %rdx movq %r15, %rsi movq (%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %r14, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %r13, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT movl $30, 32(%rsp) movl $1, 36(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $0, %r9d movl $0, %r8d movq 32(%rsp), %rdx movl $1, %ecx movq 44(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L15 .L12: movl $2, %ecx movq %rbx, %rdx movq 24(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movq (%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L16 addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movl %ebp, %r8d movq 24(%rsp), %rcx movq 16(%rsp), %rdx movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z33__device_stub__Z7vectAddPcS_S_S_iPcS_S_S_i jmp .L12 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z14compute_stringPcS_S_S_i, .-_Z14compute_stringPcS_S_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%s\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $112, %rsp .cfi_def_cfa_offset 128 movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax movabsq $5778681616646161960, %rax movabsq $5066356238548616784, %rdx movq %rax, (%rsp) movq %rdx, 8(%rsp) movabsq $5044086773655815759, %rax movabsq $33115038679120, %rdx movq %rax, 14(%rsp) movq %rdx, 22(%rsp) movabsq $723401728380766730, %rax movabsq $723401728380766730, %rdx movq %rax, 32(%rsp) movq %rdx, 40(%rsp) movabsq $723401728380766730, %rax movabsq $11038234380810, %rdx movq %rax, 46(%rsp) movq %rdx, 54(%rsp) movabsq $1520833579089401110, %rax movabsq $2095889151136373784, %rdx movq %rax, 64(%rsp) movq %rdx, 72(%rsp) movabsq $1807659874479643926, %rax movabsq $27608470984212, %rdx movq %rax, 78(%rsp) movq %rdx, 86(%rsp) movl $30, %edi call malloc@PLT movq %rax, %rbx leaq 64(%rsp), %rcx leaq 32(%rsp), %rdx movq %rsp, %rsi movl $30, %r8d movq %rax, %rdi call _Z14compute_stringPcS_S_S_i movq %rbx, %rdx leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 104(%rsp), %rax subq %fs:40, %rax jne .L20 movl $0, %eax addq $112, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z7vectAddPcS_S_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z7vectAddPcS_S_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <string.h> #include <stdio.h> #include <stdlib.h> __global__ void vectAdd(char *a, char *b, char *c, char *res, int len) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; res[i] = a[i] + b[i] + c[i]; } /* Function computing the final string to print */ void compute_string(char *res, char *a, char *b, char *c, int length) { char *d_a, *d_b, *d_c, *d_res; cudaMalloc(&d_a, length * sizeof(char)); cudaMalloc(&d_b, length * sizeof(char)); cudaMalloc(&d_c, length * sizeof(char)); cudaMalloc(&d_res, length * sizeof(char)); cudaMemcpy(d_a, a, length * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, length * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, length * sizeof(char), cudaMemcpyHostToDevice); dim3 dimBlock(30); dim3 dimGrid(1); vectAdd<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, d_res, length); cudaMemcpy(res, d_res, length * sizeof(char), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_res); } int main() { char *res; char a[30] = {40, 70, 70, 70, 80, 0, 50, 80, 80, 70, 70, 0, 40, 80, 79, 70, 0, 40, 50, 50, 0, 70, 80, 0, 30, 50, 30, 30, 0, 0}; char b[30] = {10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0}; char c[30] = {22, 21, 28, 28, 21, 22, 27, 21, 24, 28, 20, 22, 20, 24, 22, 29, 22, 21, 20, 25, 22, 25, 20, 22, 27, 25, 28, 25, 0, 0}; res = (char *)malloc(30 * sizeof(char)); /* This function call should be programmed in CUDA */ /* -> need to allocate and transfer data to/from the device */ compute_string(res, a, b, c, 30); printf("%s\n", res); return 0; }
#include <hip/hip_runtime.h> #include <string.h> #include <stdio.h> #include <stdlib.h> __global__ void vectAdd(char *a, char *b, char *c, char *res, int len) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; res[i] = a[i] + b[i] + c[i]; } /* Function computing the final string to print */ void compute_string(char *res, char *a, char *b, char *c, int length) { char *d_a, *d_b, *d_c, *d_res; hipMalloc(&d_a, length * sizeof(char)); hipMalloc(&d_b, length * sizeof(char)); hipMalloc(&d_c, length * sizeof(char)); hipMalloc(&d_res, length * sizeof(char)); hipMemcpy(d_a, a, length * sizeof(char), hipMemcpyHostToDevice); hipMemcpy(d_b, b, length * sizeof(char), hipMemcpyHostToDevice); hipMemcpy(d_c, c, length * sizeof(char), hipMemcpyHostToDevice); dim3 dimBlock(30); dim3 dimGrid(1); vectAdd<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, d_res, length); hipMemcpy(res, d_res, length * sizeof(char), hipMemcpyDeviceToHost); hipFree(d_a); hipFree(d_b); hipFree(d_c); hipFree(d_res); } int main() { char *res; char a[30] = {40, 70, 70, 70, 80, 0, 50, 80, 80, 70, 70, 0, 40, 80, 79, 70, 0, 40, 50, 50, 0, 70, 80, 0, 30, 50, 30, 30, 0, 0}; char b[30] = {10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0}; char c[30] = {22, 21, 28, 28, 21, 22, 27, 21, 24, 28, 20, 22, 20, 24, 22, 29, 22, 21, 20, 25, 22, 25, 20, 22, 27, 25, 28, 25, 0, 0}; res = (char *)malloc(30 * sizeof(char)); /* This function call should be programmed in CUDA */ /* -> need to allocate and transfer data to/from the device */ compute_string(res, a, b, c, 30); printf("%s\n", res); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <string.h> #include <stdio.h> #include <stdlib.h> __global__ void vectAdd(char *a, char *b, char *c, char *res, int len) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; res[i] = a[i] + b[i] + c[i]; } /* Function computing the final string to print */ void compute_string(char *res, char *a, char *b, char *c, int length) { char *d_a, *d_b, *d_c, *d_res; hipMalloc(&d_a, length * sizeof(char)); hipMalloc(&d_b, length * sizeof(char)); hipMalloc(&d_c, length * sizeof(char)); hipMalloc(&d_res, length * sizeof(char)); hipMemcpy(d_a, a, length * sizeof(char), hipMemcpyHostToDevice); hipMemcpy(d_b, b, length * sizeof(char), hipMemcpyHostToDevice); hipMemcpy(d_c, c, length * sizeof(char), hipMemcpyHostToDevice); dim3 dimBlock(30); dim3 dimGrid(1); vectAdd<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, d_res, length); hipMemcpy(res, d_res, length * sizeof(char), hipMemcpyDeviceToHost); hipFree(d_a); hipFree(d_b); hipFree(d_c); hipFree(d_res); } int main() { char *res; char a[30] = {40, 70, 70, 70, 80, 0, 50, 80, 80, 70, 70, 0, 40, 80, 79, 70, 0, 40, 50, 50, 0, 70, 80, 0, 30, 50, 30, 30, 0, 0}; char b[30] = {10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0}; char c[30] = {22, 21, 28, 28, 21, 22, 27, 21, 24, 28, 20, 22, 20, 24, 22, 29, 22, 21, 20, 25, 22, 25, 20, 22, 27, 25, 28, 25, 0, 0}; res = (char *)malloc(30 * sizeof(char)); /* This function call should be programmed in CUDA */ /* -> need to allocate and transfer data to/from the device */ compute_string(res, a, b, c, 30); printf("%s\n", res); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z7vectAddPcS_S_S_i .globl _Z7vectAddPcS_S_S_i .p2align 8 .type _Z7vectAddPcS_S_S_i,@function _Z7vectAddPcS_S_S_i: s_clause 0x1 s_load_b32 s8, s[0:1], 0x34 s_load_b256 s[0:7], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s8, s8, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1] v_ashrrev_i32_e32 v8, 31, v1 v_add_co_u32 v2, vcc_lo, s0, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s1, v8, vcc_lo v_add_co_u32 v4, vcc_lo, s2, v1 v_add_co_ci_u32_e32 v5, vcc_lo, s3, v8, vcc_lo v_add_co_u32 v6, vcc_lo, s4, v1 v_add_co_ci_u32_e32 v7, vcc_lo, s5, v8, vcc_lo global_load_u8 v0, v[2:3], off global_load_u8 v2, v[4:5], off global_load_u8 v3, v[6:7], off s_waitcnt vmcnt(1) v_add_nc_u16 v0, v2, v0 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) v_add_nc_u16 v2, v0, v3 v_add_co_u32 v0, vcc_lo, s6, v1 v_add_co_ci_u32_e32 v1, vcc_lo, s7, v8, vcc_lo global_store_b8 v[0:1], v2, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z7vectAddPcS_S_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z7vectAddPcS_S_S_i, .Lfunc_end0-_Z7vectAddPcS_S_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z7vectAddPcS_S_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z7vectAddPcS_S_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <string.h> #include <stdio.h> #include <stdlib.h> __global__ void vectAdd(char *a, char *b, char *c, char *res, int len) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; res[i] = a[i] + b[i] + c[i]; } /* Function computing the final string to print */ void compute_string(char *res, char *a, char *b, char *c, int length) { char *d_a, *d_b, *d_c, *d_res; hipMalloc(&d_a, length * sizeof(char)); hipMalloc(&d_b, length * sizeof(char)); hipMalloc(&d_c, length * sizeof(char)); hipMalloc(&d_res, length * sizeof(char)); hipMemcpy(d_a, a, length * sizeof(char), hipMemcpyHostToDevice); hipMemcpy(d_b, b, length * sizeof(char), hipMemcpyHostToDevice); hipMemcpy(d_c, c, length * sizeof(char), hipMemcpyHostToDevice); dim3 dimBlock(30); dim3 dimGrid(1); vectAdd<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, d_res, length); hipMemcpy(res, d_res, length * sizeof(char), hipMemcpyDeviceToHost); hipFree(d_a); hipFree(d_b); hipFree(d_c); hipFree(d_res); } int main() { char *res; char a[30] = {40, 70, 70, 70, 80, 0, 50, 80, 80, 70, 70, 0, 40, 80, 79, 70, 0, 40, 50, 50, 0, 70, 80, 0, 30, 50, 30, 30, 0, 0}; char b[30] = {10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0}; char c[30] = {22, 21, 28, 28, 21, 22, 27, 21, 24, 28, 20, 22, 20, 24, 22, 29, 22, 21, 20, 25, 22, 25, 20, 22, 27, 25, 28, 25, 0, 0}; res = (char *)malloc(30 * sizeof(char)); /* This function call should be programmed in CUDA */ /* -> need to allocate and transfer data to/from the device */ compute_string(res, a, b, c, 30); printf("%s\n", res); return 0; }
.text .file "hello_cpu.hip" .globl _Z22__device_stub__vectAddPcS_S_S_i # -- Begin function _Z22__device_stub__vectAddPcS_S_S_i .p2align 4, 0x90 .type _Z22__device_stub__vectAddPcS_S_S_i,@function _Z22__device_stub__vectAddPcS_S_S_i: # @_Z22__device_stub__vectAddPcS_S_S_i .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movl %r8d, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z7vectAddPcS_S_S_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $152, %rsp .cfi_adjust_cfa_offset -152 retq .Lfunc_end0: .size _Z22__device_stub__vectAddPcS_S_S_i, .Lfunc_end0-_Z22__device_stub__vectAddPcS_S_S_i .cfi_endproc # -- End function .globl _Z14compute_stringPcS_S_S_i # -- Begin function _Z14compute_stringPcS_S_S_i .p2align 4, 0x90 .type _Z14compute_stringPcS_S_S_i,@function _Z14compute_stringPcS_S_S_i: # @_Z14compute_stringPcS_S_S_i .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $168, %rsp .cfi_def_cfa_offset 224 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %r8d, %ebp movq %rcx, %r15 movq %rdx, %r12 movq %rsi, %r13 movq %rdi, %r14 movslq %r8d, %rbx leaq 32(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 24(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 16(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 8(%rsp), %rdi movq %rbx, %rsi callq hipMalloc movq 32(%rsp), %rdi movq %r13, %rsi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movq 24(%rsp), %rdi movq %r12, %rsi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movq %r15, %rsi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movabsq $4294967297, %rdi # imm = 0x100000001 leaq 29(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 32(%rsp), %rax movq 24(%rsp), %rcx movq 16(%rsp), %rdx movq 8(%rsp), %rsi movq %rax, 120(%rsp) movq %rcx, 112(%rsp) movq %rdx, 104(%rsp) movq %rsi, 96(%rsp) movl %ebp, 44(%rsp) leaq 120(%rsp), %rax movq %rax, 128(%rsp) leaq 112(%rsp), %rax movq %rax, 136(%rsp) leaq 104(%rsp), %rax movq %rax, 144(%rsp) leaq 96(%rsp), %rax movq %rax, 152(%rsp) leaq 44(%rsp), %rax movq %rax, 160(%rsp) leaq 80(%rsp), %rdi leaq 64(%rsp), %rsi leaq 56(%rsp), %rdx leaq 48(%rsp), %rcx callq __hipPopCallConfiguration movq 80(%rsp), %rsi movl 88(%rsp), %edx movq 64(%rsp), %rcx movl 72(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z7vectAddPcS_S_S_i, %edi pushq 48(%rsp) .cfi_adjust_cfa_offset 8 pushq 64(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: movq 8(%rsp), %rsi movq %r14, %rdi movq %rbx, %rdx movl $2, %ecx callq hipMemcpy movq 32(%rsp), %rdi callq hipFree movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree addq $168, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z14compute_stringPcS_S_S_i, .Lfunc_end1-_Z14compute_stringPcS_S_S_i .cfi_endproc # -- End function .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 # -- Begin function main .LCPI2_0: .byte 40 # 0x28 .byte 70 # 0x46 .byte 70 # 0x46 .byte 70 # 0x46 .byte 80 # 0x50 .byte 0 # 0x0 .byte 50 # 0x32 .byte 80 # 0x50 .byte 80 # 0x50 .byte 70 # 0x46 .byte 70 # 0x46 .byte 0 # 0x0 .byte 40 # 0x28 .byte 80 # 0x50 .byte 79 # 0x4f .byte 70 # 0x46 .LCPI2_1: .zero 16,10 .LCPI2_2: .byte 22 # 0x16 .byte 21 # 0x15 .byte 28 # 0x1c .byte 28 # 0x1c .byte 21 # 0x15 .byte 22 # 0x16 .byte 27 # 0x1b .byte 21 # 0x15 .byte 24 # 0x18 .byte 28 # 0x1c .byte 20 # 0x14 .byte 22 # 0x16 .byte 20 # 0x14 .byte 24 # 0x18 .byte 22 # 0x16 .byte 29 # 0x1d .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $96, %rsp .cfi_def_cfa_offset 112 .cfi_offset %rbx, -16 movaps .LCPI2_0(%rip), %xmm0 # xmm0 = [40,70,70,70,80,0,50,80,80,70,70,0,40,80,79,70] movaps %xmm0, 32(%rsp) movabsq $22594964792944640, %rax # imm = 0x50460032322800 movq %rax, 48(%rsp) movl $505295390, 56(%rsp) # imm = 0x1E1E321E movw $0, 60(%rsp) movaps .LCPI2_1(%rip), %xmm0 # xmm0 = [10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10] movups %xmm0, 76(%rsp) movaps %xmm0, 64(%rsp) movw $0, 92(%rsp) movaps .LCPI2_2(%rip), %xmm0 # xmm0 = [22,21,28,28,21,22,27,21,24,28,20,22,20,24,22,29] movaps %xmm0, (%rsp) movabsq $1590924151069349142, %rax # imm = 0x1614191619141516 movq %rax, 16(%rsp) movl $421271835, 24(%rsp) # imm = 0x191C191B movw $0, 28(%rsp) movl $30, %edi callq malloc movq %rax, %rbx leaq 32(%rsp), %rsi leaq 64(%rsp), %rdx movq %rsp, %rcx movq %rax, %rdi movl $30, %r8d callq _Z14compute_stringPcS_S_S_i movq %rbx, %rdi callq puts@PLT xorl %eax, %eax addq $96, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7vectAddPcS_S_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z7vectAddPcS_S_S_i,@object # @_Z7vectAddPcS_S_S_i .section .rodata,"a",@progbits .globl _Z7vectAddPcS_S_S_i .p2align 3, 0x0 _Z7vectAddPcS_S_S_i: .quad _Z22__device_stub__vectAddPcS_S_S_i .size _Z7vectAddPcS_S_S_i, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z7vectAddPcS_S_S_i" .size .L__unnamed_1, 20 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z22__device_stub__vectAddPcS_S_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z7vectAddPcS_S_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z7vectAddPcS_S_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0040*/ IMAD R8, R8, c[0x0][0x0], R3 ; /* 0x0000000008087a24 */ /* 0x001fca00078e0203 */ /*0050*/ SHF.R.S32.HI R0, RZ, 0x1f, R8 ; /* 0x0000001fff007819 */ /* 0x000fe40000011408 */ /*0060*/ IADD3 R4, P1, R8.reuse, c[0x0][0x168], RZ ; /* 0x00005a0008047a10 */ /* 0x040fe40007f3e0ff */ /*0070*/ IADD3 R2, P0, R8.reuse, c[0x0][0x160], RZ ; /* 0x0000580008027a10 */ /* 0x040fe40007f1e0ff */ /*0080*/ IADD3 R6, P2, R8, c[0x0][0x170], RZ ; /* 0x00005c0008067a10 */ /* 0x000fe40007f5e0ff */ /*0090*/ IADD3.X R5, R0.reuse, c[0x0][0x16c], RZ, P1, !PT ; /* 0x00005b0000057a10 */ /* 0x040fe40000ffe4ff */ /*00a0*/ IADD3.X R3, R0, c[0x0][0x164], RZ, P0, !PT ; /* 0x0000590000037a10 */ /* 0x000fc400007fe4ff */ /*00b0*/ IADD3.X R7, R0, c[0x0][0x174], RZ, P2, !PT ; /* 0x00005d0000077a10 */ /* 0x000fe400017fe4ff */ /*00c0*/ LDG.E.U8 R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea8000c1e1100 */ /*00d0*/ LDG.E.U8 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1100 */ /*00e0*/ LDG.E.U8 R6, [R6.64] ; /* 0x0000000406067981 */ /* 0x000ea2000c1e1100 */ /*00f0*/ IADD3 R8, P0, R8, c[0x0][0x178], RZ ; /* 0x00005e0008087a10 */ /* 0x000fc80007f1e0ff */ /*0100*/ IADD3.X R9, R0, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f0000097a10 */ /* 0x000fe400007fe4ff */ /*0110*/ IADD3 R11, R6, R5, R2 ; /* 0x00000005060b7210 */ /* 0x004fca0007ffe002 */ /*0120*/ STG.E.U8 [R8.64], R11 ; /* 0x0000000b08007986 */ /* 0x000fe2000c101104 */ /*0130*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0140*/ BRA 0x140; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z7vectAddPcS_S_S_i .globl _Z7vectAddPcS_S_S_i .p2align 8 .type _Z7vectAddPcS_S_S_i,@function _Z7vectAddPcS_S_S_i: s_clause 0x1 s_load_b32 s8, s[0:1], 0x34 s_load_b256 s[0:7], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s8, s8, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1] v_ashrrev_i32_e32 v8, 31, v1 v_add_co_u32 v2, vcc_lo, s0, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s1, v8, vcc_lo v_add_co_u32 v4, vcc_lo, s2, v1 v_add_co_ci_u32_e32 v5, vcc_lo, s3, v8, vcc_lo v_add_co_u32 v6, vcc_lo, s4, v1 v_add_co_ci_u32_e32 v7, vcc_lo, s5, v8, vcc_lo global_load_u8 v0, v[2:3], off global_load_u8 v2, v[4:5], off global_load_u8 v3, v[6:7], off s_waitcnt vmcnt(1) v_add_nc_u16 v0, v2, v0 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) v_add_nc_u16 v2, v0, v3 v_add_co_u32 v0, vcc_lo, s6, v1 v_add_co_ci_u32_e32 v1, vcc_lo, s7, v8, vcc_lo global_store_b8 v[0:1], v2, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z7vectAddPcS_S_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z7vectAddPcS_S_S_i, .Lfunc_end0-_Z7vectAddPcS_S_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z7vectAddPcS_S_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z7vectAddPcS_S_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001b388a_00000000-6_hello_cpu.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z33__device_stub__Z7vectAddPcS_S_S_iPcS_S_S_i .type _Z33__device_stub__Z7vectAddPcS_S_S_iPcS_S_S_i, @function _Z33__device_stub__Z7vectAddPcS_S_S_iPcS_S_S_i: .LFB2083: .cfi_startproc endbr64 subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movl %r8d, 12(%rsp) movq %fs:40, %rax movq %rax, 152(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 12(%rsp), %rax movq %rax, 144(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 152(%rsp), %rax subq %fs:40, %rax jne .L8 addq $168, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 184 pushq 56(%rsp) .cfi_def_cfa_offset 192 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z7vectAddPcS_S_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z33__device_stub__Z7vectAddPcS_S_S_iPcS_S_S_i, .-_Z33__device_stub__Z7vectAddPcS_S_S_iPcS_S_S_i .globl _Z7vectAddPcS_S_S_i .type _Z7vectAddPcS_S_S_i, @function _Z7vectAddPcS_S_S_i: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z33__device_stub__Z7vectAddPcS_S_S_iPcS_S_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z7vectAddPcS_S_S_i, .-_Z7vectAddPcS_S_S_i .globl _Z14compute_stringPcS_S_S_i .type _Z14compute_stringPcS_S_S_i, @function _Z14compute_stringPcS_S_S_i: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $72, %rsp .cfi_def_cfa_offset 128 movq %rdi, %r12 movq %rsi, %r15 movq %rdx, %r14 movq %rcx, %r13 movl %r8d, %ebp movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movslq %r8d, %rbx movq %rsp, %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 8(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 16(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 24(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbx, %rdx movq %r15, %rsi movq (%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %r14, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %r13, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT movl $30, 32(%rsp) movl $1, 36(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $0, %r9d movl $0, %r8d movq 32(%rsp), %rdx movl $1, %ecx movq 44(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L15 .L12: movl $2, %ecx movq %rbx, %rdx movq 24(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movq (%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L16 addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movl %ebp, %r8d movq 24(%rsp), %rcx movq 16(%rsp), %rdx movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z33__device_stub__Z7vectAddPcS_S_S_iPcS_S_S_i jmp .L12 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z14compute_stringPcS_S_S_i, .-_Z14compute_stringPcS_S_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%s\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $112, %rsp .cfi_def_cfa_offset 128 movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax movabsq $5778681616646161960, %rax movabsq $5066356238548616784, %rdx movq %rax, (%rsp) movq %rdx, 8(%rsp) movabsq $5044086773655815759, %rax movabsq $33115038679120, %rdx movq %rax, 14(%rsp) movq %rdx, 22(%rsp) movabsq $723401728380766730, %rax movabsq $723401728380766730, %rdx movq %rax, 32(%rsp) movq %rdx, 40(%rsp) movabsq $723401728380766730, %rax movabsq $11038234380810, %rdx movq %rax, 46(%rsp) movq %rdx, 54(%rsp) movabsq $1520833579089401110, %rax movabsq $2095889151136373784, %rdx movq %rax, 64(%rsp) movq %rdx, 72(%rsp) movabsq $1807659874479643926, %rax movabsq $27608470984212, %rdx movq %rax, 78(%rsp) movq %rdx, 86(%rsp) movl $30, %edi call malloc@PLT movq %rax, %rbx leaq 64(%rsp), %rcx leaq 32(%rsp), %rdx movq %rsp, %rsi movl $30, %r8d movq %rax, %rdi call _Z14compute_stringPcS_S_S_i movq %rbx, %rdx leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 104(%rsp), %rax subq %fs:40, %rax jne .L20 movl $0, %eax addq $112, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z7vectAddPcS_S_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z7vectAddPcS_S_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "hello_cpu.hip" .globl _Z22__device_stub__vectAddPcS_S_S_i # -- Begin function _Z22__device_stub__vectAddPcS_S_S_i .p2align 4, 0x90 .type _Z22__device_stub__vectAddPcS_S_S_i,@function _Z22__device_stub__vectAddPcS_S_S_i: # @_Z22__device_stub__vectAddPcS_S_S_i .cfi_startproc # %bb.0: subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movl %r8d, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z7vectAddPcS_S_S_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $152, %rsp .cfi_adjust_cfa_offset -152 retq .Lfunc_end0: .size _Z22__device_stub__vectAddPcS_S_S_i, .Lfunc_end0-_Z22__device_stub__vectAddPcS_S_S_i .cfi_endproc # -- End function .globl _Z14compute_stringPcS_S_S_i # -- Begin function _Z14compute_stringPcS_S_S_i .p2align 4, 0x90 .type _Z14compute_stringPcS_S_S_i,@function _Z14compute_stringPcS_S_S_i: # @_Z14compute_stringPcS_S_S_i .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $168, %rsp .cfi_def_cfa_offset 224 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %r8d, %ebp movq %rcx, %r15 movq %rdx, %r12 movq %rsi, %r13 movq %rdi, %r14 movslq %r8d, %rbx leaq 32(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 24(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 16(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 8(%rsp), %rdi movq %rbx, %rsi callq hipMalloc movq 32(%rsp), %rdi movq %r13, %rsi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movq 24(%rsp), %rdi movq %r12, %rsi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movq %r15, %rsi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movabsq $4294967297, %rdi # imm = 0x100000001 leaq 29(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 32(%rsp), %rax movq 24(%rsp), %rcx movq 16(%rsp), %rdx movq 8(%rsp), %rsi movq %rax, 120(%rsp) movq %rcx, 112(%rsp) movq %rdx, 104(%rsp) movq %rsi, 96(%rsp) movl %ebp, 44(%rsp) leaq 120(%rsp), %rax movq %rax, 128(%rsp) leaq 112(%rsp), %rax movq %rax, 136(%rsp) leaq 104(%rsp), %rax movq %rax, 144(%rsp) leaq 96(%rsp), %rax movq %rax, 152(%rsp) leaq 44(%rsp), %rax movq %rax, 160(%rsp) leaq 80(%rsp), %rdi leaq 64(%rsp), %rsi leaq 56(%rsp), %rdx leaq 48(%rsp), %rcx callq __hipPopCallConfiguration movq 80(%rsp), %rsi movl 88(%rsp), %edx movq 64(%rsp), %rcx movl 72(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z7vectAddPcS_S_S_i, %edi pushq 48(%rsp) .cfi_adjust_cfa_offset 8 pushq 64(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: movq 8(%rsp), %rsi movq %r14, %rdi movq %rbx, %rdx movl $2, %ecx callq hipMemcpy movq 32(%rsp), %rdi callq hipFree movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree addq $168, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z14compute_stringPcS_S_S_i, .Lfunc_end1-_Z14compute_stringPcS_S_S_i .cfi_endproc # -- End function .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 # -- Begin function main .LCPI2_0: .byte 40 # 0x28 .byte 70 # 0x46 .byte 70 # 0x46 .byte 70 # 0x46 .byte 80 # 0x50 .byte 0 # 0x0 .byte 50 # 0x32 .byte 80 # 0x50 .byte 80 # 0x50 .byte 70 # 0x46 .byte 70 # 0x46 .byte 0 # 0x0 .byte 40 # 0x28 .byte 80 # 0x50 .byte 79 # 0x4f .byte 70 # 0x46 .LCPI2_1: .zero 16,10 .LCPI2_2: .byte 22 # 0x16 .byte 21 # 0x15 .byte 28 # 0x1c .byte 28 # 0x1c .byte 21 # 0x15 .byte 22 # 0x16 .byte 27 # 0x1b .byte 21 # 0x15 .byte 24 # 0x18 .byte 28 # 0x1c .byte 20 # 0x14 .byte 22 # 0x16 .byte 20 # 0x14 .byte 24 # 0x18 .byte 22 # 0x16 .byte 29 # 0x1d .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $96, %rsp .cfi_def_cfa_offset 112 .cfi_offset %rbx, -16 movaps .LCPI2_0(%rip), %xmm0 # xmm0 = [40,70,70,70,80,0,50,80,80,70,70,0,40,80,79,70] movaps %xmm0, 32(%rsp) movabsq $22594964792944640, %rax # imm = 0x50460032322800 movq %rax, 48(%rsp) movl $505295390, 56(%rsp) # imm = 0x1E1E321E movw $0, 60(%rsp) movaps .LCPI2_1(%rip), %xmm0 # xmm0 = [10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10] movups %xmm0, 76(%rsp) movaps %xmm0, 64(%rsp) movw $0, 92(%rsp) movaps .LCPI2_2(%rip), %xmm0 # xmm0 = [22,21,28,28,21,22,27,21,24,28,20,22,20,24,22,29] movaps %xmm0, (%rsp) movabsq $1590924151069349142, %rax # imm = 0x1614191619141516 movq %rax, 16(%rsp) movl $421271835, 24(%rsp) # imm = 0x191C191B movw $0, 28(%rsp) movl $30, %edi callq malloc movq %rax, %rbx leaq 32(%rsp), %rsi leaq 64(%rsp), %rdx movq %rsp, %rcx movq %rax, %rdi movl $30, %r8d callq _Z14compute_stringPcS_S_S_i movq %rbx, %rdi callq puts@PLT xorl %eax, %eax addq $96, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7vectAddPcS_S_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z7vectAddPcS_S_S_i,@object # @_Z7vectAddPcS_S_S_i .section .rodata,"a",@progbits .globl _Z7vectAddPcS_S_S_i .p2align 3, 0x0 _Z7vectAddPcS_S_S_i: .quad _Z22__device_stub__vectAddPcS_S_S_i .size _Z7vectAddPcS_S_S_i, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z7vectAddPcS_S_S_i" .size .L__unnamed_1, 20 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z22__device_stub__vectAddPcS_S_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z7vectAddPcS_S_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
/* #include "Fluid_2D_kernels.cuh" __global__ void applyGravityImpl(Cell2D* cells, int sizeX, int sizeY, float timeStep, float gravitationalAcceleration) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; if (get2D(cells, x, y).content == CONTENT_FLUID) { get2D(cells, x, y).newVelocity.y -= gravitationalAcceleration * timeStep; if (get2D(cells, x, y + 1).content == CONTENT_AIR) get2D(cells, x, y + 1).newVelocity.y -= gravitationalAcceleration * timeStep; } else if (get2D(cells, x, y).content == CONTENT_AIR) { //if( x-1 >0 && grid.get2D(cells,x-1,y).content == CONTENT_AIR) grid.get2D(cells,x,y).newVelocity.x = 0; //if( y-1 >0 && grid.get2D(cells,x,y-1).content == CONTENT_AIR) grid.get2D(cells,x,y).newVelocity.y = 0; } } __global__ void fixBoundaryX(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index > sizeY) return; int y = index; get2D(cells, 0, y).newVelocity.x = 0; get2D(cells, sizeX, y).newVelocity.x = 0; get2D(cells, 0, y).hasVelocityX = true; get2D(cells, sizeX, y).hasVelocityX = true; get2D(cells, sizeX, y).content = CONTENT_SOLID; } __global__ void fixBoundaryY(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index > sizeX) return; int x = index; get2D(cells, x, 0).newVelocity.y = 0; //get2D(cells,x,sizeY).newVelocity.y = 0; get2D(cells, x, 0).hasVelocityY = true; get2D(cells, x, sizeY).hasVelocityY = true; get2D(cells, x, sizeY).content = CONTENT_AIR; } __device__ __host__ float getNeibourCoefficient(int x, int y, float dt_div_rho_div_dx, float u, float& centerCoefficient, float& RHS, Cell2D* cells, int sizeX, int sizeY) { if (x >= 0 && x < sizeX && y >= 0 && y < sizeY && get2D(cells, x, y).content == CONTENT_FLUID) { return dt_div_rho_div_dx * -1; } else { if (x < 0 || y < 0 || x >= sizeX || y >= sizeY || get2D(cells, x, y).content == CONTENT_SOLID) { centerCoefficient -= dt_div_rho_div_dx; //RHS += u; return 0; } else if (get2D(cells, x, y).content == CONTENT_AIR) { return 0; } } } __global__ void constructPressureEquations(Cell2D* cells, int sizeX, int sizeY, PressureEquation2D* equations, float dt_div_rho_div_dx, bool* hasNonZeroRHS) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; get2D(cells, x, y).pressure = 0; if (get2D(cells, x, y).content != CONTENT_FLUID) return; Cell2D& thisCell = get2D(cells, x, y); Cell2D& rightCell = get2D(cells, x + 1, y); Cell2D& upCell = get2D(cells, x, y + 1); PressureEquation2D thisEquation; float RHS = (thisCell.newVelocity.y - upCell.newVelocity.y + thisCell.newVelocity.x - rightCell.newVelocity.x); float centerCoeff = dt_div_rho_div_dx * 4; float leftCoeff = getNeibourCoefficient(x - 1, y, dt_div_rho_div_dx, thisCell.newVelocity.x, centerCoeff, RHS, cells, sizeX, sizeY); float rightCoeff = getNeibourCoefficient(x + 1, y, dt_div_rho_div_dx, rightCell.newVelocity.x, centerCoeff, RHS, cells, sizeX, sizeY); float downCoeff = getNeibourCoefficient(x, y - 1, dt_div_rho_div_dx, thisCell.newVelocity.y, centerCoeff, RHS, cells, sizeX, sizeY); float upCoeff = getNeibourCoefficient(x, y + 1, dt_div_rho_div_dx, upCell.newVelocity.y, centerCoeff, RHS, cells, sizeX, sizeY); int nnz = 0; if (downCoeff) { Cell2D& downCell = get2D(cells, x, y - 1); thisEquation.termsIndex[thisEquation.termCount] = downCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = downCoeff; ++thisEquation.termCount; ++nnz; } if (leftCoeff) { Cell2D& leftCell = get2D(cells, x - 1, y); thisEquation.termsIndex[thisEquation.termCount] = leftCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = leftCoeff; ++thisEquation.termCount; ++nnz; } thisEquation.termsIndex[thisEquation.termCount] = thisCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = centerCoeff; ++thisEquation.termCount; if (rightCoeff) { thisEquation.termsIndex[thisEquation.termCount] = rightCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = rightCoeff; ++thisEquation.termCount; ++nnz; } if (upCoeff) { thisEquation.termsIndex[thisEquation.termCount] = upCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = upCoeff; ++thisEquation.termCount; ++nnz; } ++nnz; thisEquation.RHS = RHS; if (RHS != 0) { *hasNonZeroRHS = true; } thisEquation.x = x; thisEquation.y = y; equations[thisCell.fluidIndex] = thisEquation; } __global__ void setPressure(Cell2D* cells, int sizeX, int sizeY, double* pressureResult) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; if (get2D(cells, x, y).content != CONTENT_FLUID) return; get2D(cells, x, y).pressure = pressureResult[get2D(cells, x, y).fluidIndex]; } __global__ void updateVelocityWithPressureImpl(Cell2D* cells, int sizeX, int sizeY, float dt_div_rho_div_dx) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); thisCell.hasVelocityX = false; thisCell.hasVelocityY = false; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (thisCell.content == CONTENT_FLUID || leftCell.content == CONTENT_FLUID) { float uX = thisCell.newVelocity.x - dt_div_rho_div_dx * (thisCell.pressure - leftCell.pressure); thisCell.newVelocity.x = uX; thisCell.hasVelocityX = true; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (thisCell.content == CONTENT_FLUID || downCell.content == CONTENT_FLUID) { float uY = thisCell.newVelocity.y - dt_div_rho_div_dx * (thisCell.pressure - downCell.pressure); thisCell.newVelocity.y = uY; thisCell.hasVelocityY = true; } } } __global__ void extrapolateVelocityByOne(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); const float epsilon = 1e-6; if (!thisCell.hasVelocityX) { float sumNeighborX = 0; int neighborXCount = 0; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (leftCell.hasVelocityX && leftCell.newVelocity.x > epsilon) { sumNeighborX += leftCell.newVelocity.x; neighborXCount++; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (downCell.hasVelocityX && downCell.newVelocity.y > epsilon) { sumNeighborX += downCell.newVelocity.x; neighborXCount++; } } if (x < sizeX - 1) { Cell2D& rightCell = get2D(cells, x + 1, y); if (rightCell.hasVelocityX && rightCell.newVelocity.x < -epsilon) { sumNeighborX += rightCell.newVelocity.x; neighborXCount++; } } if (y < sizeY - 1) { Cell2D& upCell = get2D(cells, x, y + 1); if (upCell.hasVelocityX && upCell.newVelocity.y < -epsilon) { sumNeighborX += upCell.newVelocity.x; neighborXCount++; } } if (neighborXCount > 0) { thisCell.newVelocity.x = sumNeighborX / (float)neighborXCount; thisCell.hasVelocityX = true; } } if (!thisCell.hasVelocityY) { float sumNeighborY = 0; int neighborYCount = 0; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (leftCell.hasVelocityY && leftCell.newVelocity.x > epsilon) { sumNeighborY += leftCell.newVelocity.y; neighborYCount++; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (downCell.hasVelocityY && downCell.newVelocity.y > epsilon) { sumNeighborY += downCell.newVelocity.y; neighborYCount++; } } if (x < sizeX - 1) { Cell2D& rightCell = get2D(cells, x + 1, y); if (rightCell.hasVelocityY && rightCell.newVelocity.x < -epsilon) { sumNeighborY += rightCell.newVelocity.y; neighborYCount++; } } if (y < sizeY - 1) { Cell2D& upCell = get2D(cells, x, y + 1); if (upCell.hasVelocityY && upCell.newVelocity.y < -epsilon) { sumNeighborY += upCell.newVelocity.y; neighborYCount++; } } if (neighborYCount > 0) { thisCell.newVelocity.y = sumNeighborY / (float)neighborYCount; thisCell.hasVelocityY = true; } } } __global__ void drawCellImpl(Cell2D* cells, int sizeX, int sizeY, unsigned char* image) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); unsigned char* base = image + 4 * (sizeX * y + x); int cellID = x * (sizeY + 1) + y; if (thisCell.content == CONTENT_FLUID) { float fluid1percentage = thisCell.fluid1Count / (thisCell.fluid1Count + thisCell.fluid0Count); base[0] = 255 * fluid1percentage; base[1] = 0; base[2] = 255 * (1 - fluid1percentage); thisCell.fluid1Count = thisCell.fluid0Count = 0; } else { base[0] = 255; base[1] = 255; base[2] = 255; } base[3] = 255; } __global__ void computeDivergenceImpl(Cell2D* cells, int sizeX, int sizeY, float cellPhysicalSize) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); Cell2D& upCell = get2D(cells, x, y + 1); Cell2D& rightCell = get2D(cells, x + 1, y); float div = (upCell.newVelocity.y - thisCell.newVelocity.y + rightCell.newVelocity.x - thisCell.newVelocity.x) / cellPhysicalSize; thisCell.divergence = div; } __global__ void resetPressureImpl(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); thisCell.pressure = 0; } __global__ void jacobiImpl(Cell2D* cells, int sizeX, int sizeY, float dt_div_rho_div_dx, float cellPhysicalSize) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); Cell2D& rightCell = get2D(cells, x + 1, y); Cell2D& upCell = get2D(cells, x, y + 1); if (thisCell.content == CONTENT_AIR) { thisCell.pressure = 0; return; } float RHS = -thisCell.divergence * cellPhysicalSize; float newPressure = 0; float centerCoeff = dt_div_rho_div_dx * 4; if (x > 0) newPressure += get2D(cells, x - 1, y).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (x < sizeX - 1) newPressure += get2D(cells, x + 1, y).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (y > 0) newPressure += get2D(cells, x, y - 1).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (y < sizeY - 1) newPressure += get2D(cells, x, y + 1).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } newPressure += RHS; newPressure /= centerCoeff; thisCell.pressure = newPressure; } */
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* #include "Fluid_2D_kernels.cuh" __global__ void applyGravityImpl(Cell2D* cells, int sizeX, int sizeY, float timeStep, float gravitationalAcceleration) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; if (get2D(cells, x, y).content == CONTENT_FLUID) { get2D(cells, x, y).newVelocity.y -= gravitationalAcceleration * timeStep; if (get2D(cells, x, y + 1).content == CONTENT_AIR) get2D(cells, x, y + 1).newVelocity.y -= gravitationalAcceleration * timeStep; } else if (get2D(cells, x, y).content == CONTENT_AIR) { //if( x-1 >0 && grid.get2D(cells,x-1,y).content == CONTENT_AIR) grid.get2D(cells,x,y).newVelocity.x = 0; //if( y-1 >0 && grid.get2D(cells,x,y-1).content == CONTENT_AIR) grid.get2D(cells,x,y).newVelocity.y = 0; } } __global__ void fixBoundaryX(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index > sizeY) return; int y = index; get2D(cells, 0, y).newVelocity.x = 0; get2D(cells, sizeX, y).newVelocity.x = 0; get2D(cells, 0, y).hasVelocityX = true; get2D(cells, sizeX, y).hasVelocityX = true; get2D(cells, sizeX, y).content = CONTENT_SOLID; } __global__ void fixBoundaryY(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index > sizeX) return; int x = index; get2D(cells, x, 0).newVelocity.y = 0; //get2D(cells,x,sizeY).newVelocity.y = 0; get2D(cells, x, 0).hasVelocityY = true; get2D(cells, x, sizeY).hasVelocityY = true; get2D(cells, x, sizeY).content = CONTENT_AIR; } __device__ __host__ float getNeibourCoefficient(int x, int y, float dt_div_rho_div_dx, float u, float& centerCoefficient, float& RHS, Cell2D* cells, int sizeX, int sizeY) { if (x >= 0 && x < sizeX && y >= 0 && y < sizeY && get2D(cells, x, y).content == CONTENT_FLUID) { return dt_div_rho_div_dx * -1; } else { if (x < 0 || y < 0 || x >= sizeX || y >= sizeY || get2D(cells, x, y).content == CONTENT_SOLID) { centerCoefficient -= dt_div_rho_div_dx; //RHS += u; return 0; } else if (get2D(cells, x, y).content == CONTENT_AIR) { return 0; } } } __global__ void constructPressureEquations(Cell2D* cells, int sizeX, int sizeY, PressureEquation2D* equations, float dt_div_rho_div_dx, bool* hasNonZeroRHS) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; get2D(cells, x, y).pressure = 0; if (get2D(cells, x, y).content != CONTENT_FLUID) return; Cell2D& thisCell = get2D(cells, x, y); Cell2D& rightCell = get2D(cells, x + 1, y); Cell2D& upCell = get2D(cells, x, y + 1); PressureEquation2D thisEquation; float RHS = (thisCell.newVelocity.y - upCell.newVelocity.y + thisCell.newVelocity.x - rightCell.newVelocity.x); float centerCoeff = dt_div_rho_div_dx * 4; float leftCoeff = getNeibourCoefficient(x - 1, y, dt_div_rho_div_dx, thisCell.newVelocity.x, centerCoeff, RHS, cells, sizeX, sizeY); float rightCoeff = getNeibourCoefficient(x + 1, y, dt_div_rho_div_dx, rightCell.newVelocity.x, centerCoeff, RHS, cells, sizeX, sizeY); float downCoeff = getNeibourCoefficient(x, y - 1, dt_div_rho_div_dx, thisCell.newVelocity.y, centerCoeff, RHS, cells, sizeX, sizeY); float upCoeff = getNeibourCoefficient(x, y + 1, dt_div_rho_div_dx, upCell.newVelocity.y, centerCoeff, RHS, cells, sizeX, sizeY); int nnz = 0; if (downCoeff) { Cell2D& downCell = get2D(cells, x, y - 1); thisEquation.termsIndex[thisEquation.termCount] = downCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = downCoeff; ++thisEquation.termCount; ++nnz; } if (leftCoeff) { Cell2D& leftCell = get2D(cells, x - 1, y); thisEquation.termsIndex[thisEquation.termCount] = leftCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = leftCoeff; ++thisEquation.termCount; ++nnz; } thisEquation.termsIndex[thisEquation.termCount] = thisCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = centerCoeff; ++thisEquation.termCount; if (rightCoeff) { thisEquation.termsIndex[thisEquation.termCount] = rightCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = rightCoeff; ++thisEquation.termCount; ++nnz; } if (upCoeff) { thisEquation.termsIndex[thisEquation.termCount] = upCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = upCoeff; ++thisEquation.termCount; ++nnz; } ++nnz; thisEquation.RHS = RHS; if (RHS != 0) { *hasNonZeroRHS = true; } thisEquation.x = x; thisEquation.y = y; equations[thisCell.fluidIndex] = thisEquation; } __global__ void setPressure(Cell2D* cells, int sizeX, int sizeY, double* pressureResult) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; if (get2D(cells, x, y).content != CONTENT_FLUID) return; get2D(cells, x, y).pressure = pressureResult[get2D(cells, x, y).fluidIndex]; } __global__ void updateVelocityWithPressureImpl(Cell2D* cells, int sizeX, int sizeY, float dt_div_rho_div_dx) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); thisCell.hasVelocityX = false; thisCell.hasVelocityY = false; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (thisCell.content == CONTENT_FLUID || leftCell.content == CONTENT_FLUID) { float uX = thisCell.newVelocity.x - dt_div_rho_div_dx * (thisCell.pressure - leftCell.pressure); thisCell.newVelocity.x = uX; thisCell.hasVelocityX = true; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (thisCell.content == CONTENT_FLUID || downCell.content == CONTENT_FLUID) { float uY = thisCell.newVelocity.y - dt_div_rho_div_dx * (thisCell.pressure - downCell.pressure); thisCell.newVelocity.y = uY; thisCell.hasVelocityY = true; } } } __global__ void extrapolateVelocityByOne(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); const float epsilon = 1e-6; if (!thisCell.hasVelocityX) { float sumNeighborX = 0; int neighborXCount = 0; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (leftCell.hasVelocityX && leftCell.newVelocity.x > epsilon) { sumNeighborX += leftCell.newVelocity.x; neighborXCount++; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (downCell.hasVelocityX && downCell.newVelocity.y > epsilon) { sumNeighborX += downCell.newVelocity.x; neighborXCount++; } } if (x < sizeX - 1) { Cell2D& rightCell = get2D(cells, x + 1, y); if (rightCell.hasVelocityX && rightCell.newVelocity.x < -epsilon) { sumNeighborX += rightCell.newVelocity.x; neighborXCount++; } } if (y < sizeY - 1) { Cell2D& upCell = get2D(cells, x, y + 1); if (upCell.hasVelocityX && upCell.newVelocity.y < -epsilon) { sumNeighborX += upCell.newVelocity.x; neighborXCount++; } } if (neighborXCount > 0) { thisCell.newVelocity.x = sumNeighborX / (float)neighborXCount; thisCell.hasVelocityX = true; } } if (!thisCell.hasVelocityY) { float sumNeighborY = 0; int neighborYCount = 0; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (leftCell.hasVelocityY && leftCell.newVelocity.x > epsilon) { sumNeighborY += leftCell.newVelocity.y; neighborYCount++; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (downCell.hasVelocityY && downCell.newVelocity.y > epsilon) { sumNeighborY += downCell.newVelocity.y; neighborYCount++; } } if (x < sizeX - 1) { Cell2D& rightCell = get2D(cells, x + 1, y); if (rightCell.hasVelocityY && rightCell.newVelocity.x < -epsilon) { sumNeighborY += rightCell.newVelocity.y; neighborYCount++; } } if (y < sizeY - 1) { Cell2D& upCell = get2D(cells, x, y + 1); if (upCell.hasVelocityY && upCell.newVelocity.y < -epsilon) { sumNeighborY += upCell.newVelocity.y; neighborYCount++; } } if (neighborYCount > 0) { thisCell.newVelocity.y = sumNeighborY / (float)neighborYCount; thisCell.hasVelocityY = true; } } } __global__ void drawCellImpl(Cell2D* cells, int sizeX, int sizeY, unsigned char* image) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); unsigned char* base = image + 4 * (sizeX * y + x); int cellID = x * (sizeY + 1) + y; if (thisCell.content == CONTENT_FLUID) { float fluid1percentage = thisCell.fluid1Count / (thisCell.fluid1Count + thisCell.fluid0Count); base[0] = 255 * fluid1percentage; base[1] = 0; base[2] = 255 * (1 - fluid1percentage); thisCell.fluid1Count = thisCell.fluid0Count = 0; } else { base[0] = 255; base[1] = 255; base[2] = 255; } base[3] = 255; } __global__ void computeDivergenceImpl(Cell2D* cells, int sizeX, int sizeY, float cellPhysicalSize) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); Cell2D& upCell = get2D(cells, x, y + 1); Cell2D& rightCell = get2D(cells, x + 1, y); float div = (upCell.newVelocity.y - thisCell.newVelocity.y + rightCell.newVelocity.x - thisCell.newVelocity.x) / cellPhysicalSize; thisCell.divergence = div; } __global__ void resetPressureImpl(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); thisCell.pressure = 0; } __global__ void jacobiImpl(Cell2D* cells, int sizeX, int sizeY, float dt_div_rho_div_dx, float cellPhysicalSize) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); Cell2D& rightCell = get2D(cells, x + 1, y); Cell2D& upCell = get2D(cells, x, y + 1); if (thisCell.content == CONTENT_AIR) { thisCell.pressure = 0; return; } float RHS = -thisCell.divergence * cellPhysicalSize; float newPressure = 0; float centerCoeff = dt_div_rho_div_dx * 4; if (x > 0) newPressure += get2D(cells, x - 1, y).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (x < sizeX - 1) newPressure += get2D(cells, x + 1, y).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (y > 0) newPressure += get2D(cells, x, y - 1).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (y < sizeY - 1) newPressure += get2D(cells, x, y + 1).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } newPressure += RHS; newPressure /= centerCoeff; thisCell.pressure = newPressure; } */
.file "tmpxft_001b1c6a_00000000-6_Fluid_2D_kernels.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* #include "Fluid_2D_kernels.cuh" __global__ void applyGravityImpl(Cell2D* cells, int sizeX, int sizeY, float timeStep, float gravitationalAcceleration) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; if (get2D(cells, x, y).content == CONTENT_FLUID) { get2D(cells, x, y).newVelocity.y -= gravitationalAcceleration * timeStep; if (get2D(cells, x, y + 1).content == CONTENT_AIR) get2D(cells, x, y + 1).newVelocity.y -= gravitationalAcceleration * timeStep; } else if (get2D(cells, x, y).content == CONTENT_AIR) { //if( x-1 >0 && grid.get2D(cells,x-1,y).content == CONTENT_AIR) grid.get2D(cells,x,y).newVelocity.x = 0; //if( y-1 >0 && grid.get2D(cells,x,y-1).content == CONTENT_AIR) grid.get2D(cells,x,y).newVelocity.y = 0; } } __global__ void fixBoundaryX(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index > sizeY) return; int y = index; get2D(cells, 0, y).newVelocity.x = 0; get2D(cells, sizeX, y).newVelocity.x = 0; get2D(cells, 0, y).hasVelocityX = true; get2D(cells, sizeX, y).hasVelocityX = true; get2D(cells, sizeX, y).content = CONTENT_SOLID; } __global__ void fixBoundaryY(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index > sizeX) return; int x = index; get2D(cells, x, 0).newVelocity.y = 0; //get2D(cells,x,sizeY).newVelocity.y = 0; get2D(cells, x, 0).hasVelocityY = true; get2D(cells, x, sizeY).hasVelocityY = true; get2D(cells, x, sizeY).content = CONTENT_AIR; } __device__ __host__ float getNeibourCoefficient(int x, int y, float dt_div_rho_div_dx, float u, float& centerCoefficient, float& RHS, Cell2D* cells, int sizeX, int sizeY) { if (x >= 0 && x < sizeX && y >= 0 && y < sizeY && get2D(cells, x, y).content == CONTENT_FLUID) { return dt_div_rho_div_dx * -1; } else { if (x < 0 || y < 0 || x >= sizeX || y >= sizeY || get2D(cells, x, y).content == CONTENT_SOLID) { centerCoefficient -= dt_div_rho_div_dx; //RHS += u; return 0; } else if (get2D(cells, x, y).content == CONTENT_AIR) { return 0; } } } __global__ void constructPressureEquations(Cell2D* cells, int sizeX, int sizeY, PressureEquation2D* equations, float dt_div_rho_div_dx, bool* hasNonZeroRHS) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; get2D(cells, x, y).pressure = 0; if (get2D(cells, x, y).content != CONTENT_FLUID) return; Cell2D& thisCell = get2D(cells, x, y); Cell2D& rightCell = get2D(cells, x + 1, y); Cell2D& upCell = get2D(cells, x, y + 1); PressureEquation2D thisEquation; float RHS = (thisCell.newVelocity.y - upCell.newVelocity.y + thisCell.newVelocity.x - rightCell.newVelocity.x); float centerCoeff = dt_div_rho_div_dx * 4; float leftCoeff = getNeibourCoefficient(x - 1, y, dt_div_rho_div_dx, thisCell.newVelocity.x, centerCoeff, RHS, cells, sizeX, sizeY); float rightCoeff = getNeibourCoefficient(x + 1, y, dt_div_rho_div_dx, rightCell.newVelocity.x, centerCoeff, RHS, cells, sizeX, sizeY); float downCoeff = getNeibourCoefficient(x, y - 1, dt_div_rho_div_dx, thisCell.newVelocity.y, centerCoeff, RHS, cells, sizeX, sizeY); float upCoeff = getNeibourCoefficient(x, y + 1, dt_div_rho_div_dx, upCell.newVelocity.y, centerCoeff, RHS, cells, sizeX, sizeY); int nnz = 0; if (downCoeff) { Cell2D& downCell = get2D(cells, x, y - 1); thisEquation.termsIndex[thisEquation.termCount] = downCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = downCoeff; ++thisEquation.termCount; ++nnz; } if (leftCoeff) { Cell2D& leftCell = get2D(cells, x - 1, y); thisEquation.termsIndex[thisEquation.termCount] = leftCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = leftCoeff; ++thisEquation.termCount; ++nnz; } thisEquation.termsIndex[thisEquation.termCount] = thisCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = centerCoeff; ++thisEquation.termCount; if (rightCoeff) { thisEquation.termsIndex[thisEquation.termCount] = rightCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = rightCoeff; ++thisEquation.termCount; ++nnz; } if (upCoeff) { thisEquation.termsIndex[thisEquation.termCount] = upCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = upCoeff; ++thisEquation.termCount; ++nnz; } ++nnz; thisEquation.RHS = RHS; if (RHS != 0) { *hasNonZeroRHS = true; } thisEquation.x = x; thisEquation.y = y; equations[thisCell.fluidIndex] = thisEquation; } __global__ void setPressure(Cell2D* cells, int sizeX, int sizeY, double* pressureResult) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; if (get2D(cells, x, y).content != CONTENT_FLUID) return; get2D(cells, x, y).pressure = pressureResult[get2D(cells, x, y).fluidIndex]; } __global__ void updateVelocityWithPressureImpl(Cell2D* cells, int sizeX, int sizeY, float dt_div_rho_div_dx) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); thisCell.hasVelocityX = false; thisCell.hasVelocityY = false; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (thisCell.content == CONTENT_FLUID || leftCell.content == CONTENT_FLUID) { float uX = thisCell.newVelocity.x - dt_div_rho_div_dx * (thisCell.pressure - leftCell.pressure); thisCell.newVelocity.x = uX; thisCell.hasVelocityX = true; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (thisCell.content == CONTENT_FLUID || downCell.content == CONTENT_FLUID) { float uY = thisCell.newVelocity.y - dt_div_rho_div_dx * (thisCell.pressure - downCell.pressure); thisCell.newVelocity.y = uY; thisCell.hasVelocityY = true; } } } __global__ void extrapolateVelocityByOne(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); const float epsilon = 1e-6; if (!thisCell.hasVelocityX) { float sumNeighborX = 0; int neighborXCount = 0; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (leftCell.hasVelocityX && leftCell.newVelocity.x > epsilon) { sumNeighborX += leftCell.newVelocity.x; neighborXCount++; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (downCell.hasVelocityX && downCell.newVelocity.y > epsilon) { sumNeighborX += downCell.newVelocity.x; neighborXCount++; } } if (x < sizeX - 1) { Cell2D& rightCell = get2D(cells, x + 1, y); if (rightCell.hasVelocityX && rightCell.newVelocity.x < -epsilon) { sumNeighborX += rightCell.newVelocity.x; neighborXCount++; } } if (y < sizeY - 1) { Cell2D& upCell = get2D(cells, x, y + 1); if (upCell.hasVelocityX && upCell.newVelocity.y < -epsilon) { sumNeighborX += upCell.newVelocity.x; neighborXCount++; } } if (neighborXCount > 0) { thisCell.newVelocity.x = sumNeighborX / (float)neighborXCount; thisCell.hasVelocityX = true; } } if (!thisCell.hasVelocityY) { float sumNeighborY = 0; int neighborYCount = 0; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (leftCell.hasVelocityY && leftCell.newVelocity.x > epsilon) { sumNeighborY += leftCell.newVelocity.y; neighborYCount++; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (downCell.hasVelocityY && downCell.newVelocity.y > epsilon) { sumNeighborY += downCell.newVelocity.y; neighborYCount++; } } if (x < sizeX - 1) { Cell2D& rightCell = get2D(cells, x + 1, y); if (rightCell.hasVelocityY && rightCell.newVelocity.x < -epsilon) { sumNeighborY += rightCell.newVelocity.y; neighborYCount++; } } if (y < sizeY - 1) { Cell2D& upCell = get2D(cells, x, y + 1); if (upCell.hasVelocityY && upCell.newVelocity.y < -epsilon) { sumNeighborY += upCell.newVelocity.y; neighborYCount++; } } if (neighborYCount > 0) { thisCell.newVelocity.y = sumNeighborY / (float)neighborYCount; thisCell.hasVelocityY = true; } } } __global__ void drawCellImpl(Cell2D* cells, int sizeX, int sizeY, unsigned char* image) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); unsigned char* base = image + 4 * (sizeX * y + x); int cellID = x * (sizeY + 1) + y; if (thisCell.content == CONTENT_FLUID) { float fluid1percentage = thisCell.fluid1Count / (thisCell.fluid1Count + thisCell.fluid0Count); base[0] = 255 * fluid1percentage; base[1] = 0; base[2] = 255 * (1 - fluid1percentage); thisCell.fluid1Count = thisCell.fluid0Count = 0; } else { base[0] = 255; base[1] = 255; base[2] = 255; } base[3] = 255; } __global__ void computeDivergenceImpl(Cell2D* cells, int sizeX, int sizeY, float cellPhysicalSize) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); Cell2D& upCell = get2D(cells, x, y + 1); Cell2D& rightCell = get2D(cells, x + 1, y); float div = (upCell.newVelocity.y - thisCell.newVelocity.y + rightCell.newVelocity.x - thisCell.newVelocity.x) / cellPhysicalSize; thisCell.divergence = div; } __global__ void resetPressureImpl(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); thisCell.pressure = 0; } __global__ void jacobiImpl(Cell2D* cells, int sizeX, int sizeY, float dt_div_rho_div_dx, float cellPhysicalSize) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); Cell2D& rightCell = get2D(cells, x + 1, y); Cell2D& upCell = get2D(cells, x, y + 1); if (thisCell.content == CONTENT_AIR) { thisCell.pressure = 0; return; } float RHS = -thisCell.divergence * cellPhysicalSize; float newPressure = 0; float centerCoeff = dt_div_rho_div_dx * 4; if (x > 0) newPressure += get2D(cells, x - 1, y).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (x < sizeX - 1) newPressure += get2D(cells, x + 1, y).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (y > 0) newPressure += get2D(cells, x, y - 1).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (y < sizeY - 1) newPressure += get2D(cells, x, y + 1).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } newPressure += RHS; newPressure /= centerCoeff; thisCell.pressure = newPressure; } */
#include <hip/hip_runtime.h> /* #include "Fluid_2D_kernels.cuh" __global__ void applyGravityImpl(Cell2D* cells, int sizeX, int sizeY, float timeStep, float gravitationalAcceleration) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; if (get2D(cells, x, y).content == CONTENT_FLUID) { get2D(cells, x, y).newVelocity.y -= gravitationalAcceleration * timeStep; if (get2D(cells, x, y + 1).content == CONTENT_AIR) get2D(cells, x, y + 1).newVelocity.y -= gravitationalAcceleration * timeStep; } else if (get2D(cells, x, y).content == CONTENT_AIR) { //if( x-1 >0 && grid.get2D(cells,x-1,y).content == CONTENT_AIR) grid.get2D(cells,x,y).newVelocity.x = 0; //if( y-1 >0 && grid.get2D(cells,x,y-1).content == CONTENT_AIR) grid.get2D(cells,x,y).newVelocity.y = 0; } } __global__ void fixBoundaryX(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index > sizeY) return; int y = index; get2D(cells, 0, y).newVelocity.x = 0; get2D(cells, sizeX, y).newVelocity.x = 0; get2D(cells, 0, y).hasVelocityX = true; get2D(cells, sizeX, y).hasVelocityX = true; get2D(cells, sizeX, y).content = CONTENT_SOLID; } __global__ void fixBoundaryY(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index > sizeX) return; int x = index; get2D(cells, x, 0).newVelocity.y = 0; //get2D(cells,x,sizeY).newVelocity.y = 0; get2D(cells, x, 0).hasVelocityY = true; get2D(cells, x, sizeY).hasVelocityY = true; get2D(cells, x, sizeY).content = CONTENT_AIR; } __device__ __host__ float getNeibourCoefficient(int x, int y, float dt_div_rho_div_dx, float u, float& centerCoefficient, float& RHS, Cell2D* cells, int sizeX, int sizeY) { if (x >= 0 && x < sizeX && y >= 0 && y < sizeY && get2D(cells, x, y).content == CONTENT_FLUID) { return dt_div_rho_div_dx * -1; } else { if (x < 0 || y < 0 || x >= sizeX || y >= sizeY || get2D(cells, x, y).content == CONTENT_SOLID) { centerCoefficient -= dt_div_rho_div_dx; //RHS += u; return 0; } else if (get2D(cells, x, y).content == CONTENT_AIR) { return 0; } } } __global__ void constructPressureEquations(Cell2D* cells, int sizeX, int sizeY, PressureEquation2D* equations, float dt_div_rho_div_dx, bool* hasNonZeroRHS) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; get2D(cells, x, y).pressure = 0; if (get2D(cells, x, y).content != CONTENT_FLUID) return; Cell2D& thisCell = get2D(cells, x, y); Cell2D& rightCell = get2D(cells, x + 1, y); Cell2D& upCell = get2D(cells, x, y + 1); PressureEquation2D thisEquation; float RHS = (thisCell.newVelocity.y - upCell.newVelocity.y + thisCell.newVelocity.x - rightCell.newVelocity.x); float centerCoeff = dt_div_rho_div_dx * 4; float leftCoeff = getNeibourCoefficient(x - 1, y, dt_div_rho_div_dx, thisCell.newVelocity.x, centerCoeff, RHS, cells, sizeX, sizeY); float rightCoeff = getNeibourCoefficient(x + 1, y, dt_div_rho_div_dx, rightCell.newVelocity.x, centerCoeff, RHS, cells, sizeX, sizeY); float downCoeff = getNeibourCoefficient(x, y - 1, dt_div_rho_div_dx, thisCell.newVelocity.y, centerCoeff, RHS, cells, sizeX, sizeY); float upCoeff = getNeibourCoefficient(x, y + 1, dt_div_rho_div_dx, upCell.newVelocity.y, centerCoeff, RHS, cells, sizeX, sizeY); int nnz = 0; if (downCoeff) { Cell2D& downCell = get2D(cells, x, y - 1); thisEquation.termsIndex[thisEquation.termCount] = downCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = downCoeff; ++thisEquation.termCount; ++nnz; } if (leftCoeff) { Cell2D& leftCell = get2D(cells, x - 1, y); thisEquation.termsIndex[thisEquation.termCount] = leftCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = leftCoeff; ++thisEquation.termCount; ++nnz; } thisEquation.termsIndex[thisEquation.termCount] = thisCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = centerCoeff; ++thisEquation.termCount; if (rightCoeff) { thisEquation.termsIndex[thisEquation.termCount] = rightCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = rightCoeff; ++thisEquation.termCount; ++nnz; } if (upCoeff) { thisEquation.termsIndex[thisEquation.termCount] = upCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = upCoeff; ++thisEquation.termCount; ++nnz; } ++nnz; thisEquation.RHS = RHS; if (RHS != 0) { *hasNonZeroRHS = true; } thisEquation.x = x; thisEquation.y = y; equations[thisCell.fluidIndex] = thisEquation; } __global__ void setPressure(Cell2D* cells, int sizeX, int sizeY, double* pressureResult) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; if (get2D(cells, x, y).content != CONTENT_FLUID) return; get2D(cells, x, y).pressure = pressureResult[get2D(cells, x, y).fluidIndex]; } __global__ void updateVelocityWithPressureImpl(Cell2D* cells, int sizeX, int sizeY, float dt_div_rho_div_dx) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); thisCell.hasVelocityX = false; thisCell.hasVelocityY = false; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (thisCell.content == CONTENT_FLUID || leftCell.content == CONTENT_FLUID) { float uX = thisCell.newVelocity.x - dt_div_rho_div_dx * (thisCell.pressure - leftCell.pressure); thisCell.newVelocity.x = uX; thisCell.hasVelocityX = true; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (thisCell.content == CONTENT_FLUID || downCell.content == CONTENT_FLUID) { float uY = thisCell.newVelocity.y - dt_div_rho_div_dx * (thisCell.pressure - downCell.pressure); thisCell.newVelocity.y = uY; thisCell.hasVelocityY = true; } } } __global__ void extrapolateVelocityByOne(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); const float epsilon = 1e-6; if (!thisCell.hasVelocityX) { float sumNeighborX = 0; int neighborXCount = 0; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (leftCell.hasVelocityX && leftCell.newVelocity.x > epsilon) { sumNeighborX += leftCell.newVelocity.x; neighborXCount++; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (downCell.hasVelocityX && downCell.newVelocity.y > epsilon) { sumNeighborX += downCell.newVelocity.x; neighborXCount++; } } if (x < sizeX - 1) { Cell2D& rightCell = get2D(cells, x + 1, y); if (rightCell.hasVelocityX && rightCell.newVelocity.x < -epsilon) { sumNeighborX += rightCell.newVelocity.x; neighborXCount++; } } if (y < sizeY - 1) { Cell2D& upCell = get2D(cells, x, y + 1); if (upCell.hasVelocityX && upCell.newVelocity.y < -epsilon) { sumNeighborX += upCell.newVelocity.x; neighborXCount++; } } if (neighborXCount > 0) { thisCell.newVelocity.x = sumNeighborX / (float)neighborXCount; thisCell.hasVelocityX = true; } } if (!thisCell.hasVelocityY) { float sumNeighborY = 0; int neighborYCount = 0; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (leftCell.hasVelocityY && leftCell.newVelocity.x > epsilon) { sumNeighborY += leftCell.newVelocity.y; neighborYCount++; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (downCell.hasVelocityY && downCell.newVelocity.y > epsilon) { sumNeighborY += downCell.newVelocity.y; neighborYCount++; } } if (x < sizeX - 1) { Cell2D& rightCell = get2D(cells, x + 1, y); if (rightCell.hasVelocityY && rightCell.newVelocity.x < -epsilon) { sumNeighborY += rightCell.newVelocity.y; neighborYCount++; } } if (y < sizeY - 1) { Cell2D& upCell = get2D(cells, x, y + 1); if (upCell.hasVelocityY && upCell.newVelocity.y < -epsilon) { sumNeighborY += upCell.newVelocity.y; neighborYCount++; } } if (neighborYCount > 0) { thisCell.newVelocity.y = sumNeighborY / (float)neighborYCount; thisCell.hasVelocityY = true; } } } __global__ void drawCellImpl(Cell2D* cells, int sizeX, int sizeY, unsigned char* image) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); unsigned char* base = image + 4 * (sizeX * y + x); int cellID = x * (sizeY + 1) + y; if (thisCell.content == CONTENT_FLUID) { float fluid1percentage = thisCell.fluid1Count / (thisCell.fluid1Count + thisCell.fluid0Count); base[0] = 255 * fluid1percentage; base[1] = 0; base[2] = 255 * (1 - fluid1percentage); thisCell.fluid1Count = thisCell.fluid0Count = 0; } else { base[0] = 255; base[1] = 255; base[2] = 255; } base[3] = 255; } __global__ void computeDivergenceImpl(Cell2D* cells, int sizeX, int sizeY, float cellPhysicalSize) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); Cell2D& upCell = get2D(cells, x, y + 1); Cell2D& rightCell = get2D(cells, x + 1, y); float div = (upCell.newVelocity.y - thisCell.newVelocity.y + rightCell.newVelocity.x - thisCell.newVelocity.x) / cellPhysicalSize; thisCell.divergence = div; } __global__ void resetPressureImpl(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); thisCell.pressure = 0; } __global__ void jacobiImpl(Cell2D* cells, int sizeX, int sizeY, float dt_div_rho_div_dx, float cellPhysicalSize) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); Cell2D& rightCell = get2D(cells, x + 1, y); Cell2D& upCell = get2D(cells, x, y + 1); if (thisCell.content == CONTENT_AIR) { thisCell.pressure = 0; return; } float RHS = -thisCell.divergence * cellPhysicalSize; float newPressure = 0; float centerCoeff = dt_div_rho_div_dx * 4; if (x > 0) newPressure += get2D(cells, x - 1, y).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (x < sizeX - 1) newPressure += get2D(cells, x + 1, y).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (y > 0) newPressure += get2D(cells, x, y - 1).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (y < sizeY - 1) newPressure += get2D(cells, x, y + 1).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } newPressure += RHS; newPressure /= centerCoeff; thisCell.pressure = newPressure; } */
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> /* #include "Fluid_2D_kernels.cuh" __global__ void applyGravityImpl(Cell2D* cells, int sizeX, int sizeY, float timeStep, float gravitationalAcceleration) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; if (get2D(cells, x, y).content == CONTENT_FLUID) { get2D(cells, x, y).newVelocity.y -= gravitationalAcceleration * timeStep; if (get2D(cells, x, y + 1).content == CONTENT_AIR) get2D(cells, x, y + 1).newVelocity.y -= gravitationalAcceleration * timeStep; } else if (get2D(cells, x, y).content == CONTENT_AIR) { //if( x-1 >0 && grid.get2D(cells,x-1,y).content == CONTENT_AIR) grid.get2D(cells,x,y).newVelocity.x = 0; //if( y-1 >0 && grid.get2D(cells,x,y-1).content == CONTENT_AIR) grid.get2D(cells,x,y).newVelocity.y = 0; } } __global__ void fixBoundaryX(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index > sizeY) return; int y = index; get2D(cells, 0, y).newVelocity.x = 0; get2D(cells, sizeX, y).newVelocity.x = 0; get2D(cells, 0, y).hasVelocityX = true; get2D(cells, sizeX, y).hasVelocityX = true; get2D(cells, sizeX, y).content = CONTENT_SOLID; } __global__ void fixBoundaryY(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index > sizeX) return; int x = index; get2D(cells, x, 0).newVelocity.y = 0; //get2D(cells,x,sizeY).newVelocity.y = 0; get2D(cells, x, 0).hasVelocityY = true; get2D(cells, x, sizeY).hasVelocityY = true; get2D(cells, x, sizeY).content = CONTENT_AIR; } __device__ __host__ float getNeibourCoefficient(int x, int y, float dt_div_rho_div_dx, float u, float& centerCoefficient, float& RHS, Cell2D* cells, int sizeX, int sizeY) { if (x >= 0 && x < sizeX && y >= 0 && y < sizeY && get2D(cells, x, y).content == CONTENT_FLUID) { return dt_div_rho_div_dx * -1; } else { if (x < 0 || y < 0 || x >= sizeX || y >= sizeY || get2D(cells, x, y).content == CONTENT_SOLID) { centerCoefficient -= dt_div_rho_div_dx; //RHS += u; return 0; } else if (get2D(cells, x, y).content == CONTENT_AIR) { return 0; } } } __global__ void constructPressureEquations(Cell2D* cells, int sizeX, int sizeY, PressureEquation2D* equations, float dt_div_rho_div_dx, bool* hasNonZeroRHS) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; get2D(cells, x, y).pressure = 0; if (get2D(cells, x, y).content != CONTENT_FLUID) return; Cell2D& thisCell = get2D(cells, x, y); Cell2D& rightCell = get2D(cells, x + 1, y); Cell2D& upCell = get2D(cells, x, y + 1); PressureEquation2D thisEquation; float RHS = (thisCell.newVelocity.y - upCell.newVelocity.y + thisCell.newVelocity.x - rightCell.newVelocity.x); float centerCoeff = dt_div_rho_div_dx * 4; float leftCoeff = getNeibourCoefficient(x - 1, y, dt_div_rho_div_dx, thisCell.newVelocity.x, centerCoeff, RHS, cells, sizeX, sizeY); float rightCoeff = getNeibourCoefficient(x + 1, y, dt_div_rho_div_dx, rightCell.newVelocity.x, centerCoeff, RHS, cells, sizeX, sizeY); float downCoeff = getNeibourCoefficient(x, y - 1, dt_div_rho_div_dx, thisCell.newVelocity.y, centerCoeff, RHS, cells, sizeX, sizeY); float upCoeff = getNeibourCoefficient(x, y + 1, dt_div_rho_div_dx, upCell.newVelocity.y, centerCoeff, RHS, cells, sizeX, sizeY); int nnz = 0; if (downCoeff) { Cell2D& downCell = get2D(cells, x, y - 1); thisEquation.termsIndex[thisEquation.termCount] = downCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = downCoeff; ++thisEquation.termCount; ++nnz; } if (leftCoeff) { Cell2D& leftCell = get2D(cells, x - 1, y); thisEquation.termsIndex[thisEquation.termCount] = leftCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = leftCoeff; ++thisEquation.termCount; ++nnz; } thisEquation.termsIndex[thisEquation.termCount] = thisCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = centerCoeff; ++thisEquation.termCount; if (rightCoeff) { thisEquation.termsIndex[thisEquation.termCount] = rightCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = rightCoeff; ++thisEquation.termCount; ++nnz; } if (upCoeff) { thisEquation.termsIndex[thisEquation.termCount] = upCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = upCoeff; ++thisEquation.termCount; ++nnz; } ++nnz; thisEquation.RHS = RHS; if (RHS != 0) { *hasNonZeroRHS = true; } thisEquation.x = x; thisEquation.y = y; equations[thisCell.fluidIndex] = thisEquation; } __global__ void setPressure(Cell2D* cells, int sizeX, int sizeY, double* pressureResult) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; if (get2D(cells, x, y).content != CONTENT_FLUID) return; get2D(cells, x, y).pressure = pressureResult[get2D(cells, x, y).fluidIndex]; } __global__ void updateVelocityWithPressureImpl(Cell2D* cells, int sizeX, int sizeY, float dt_div_rho_div_dx) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); thisCell.hasVelocityX = false; thisCell.hasVelocityY = false; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (thisCell.content == CONTENT_FLUID || leftCell.content == CONTENT_FLUID) { float uX = thisCell.newVelocity.x - dt_div_rho_div_dx * (thisCell.pressure - leftCell.pressure); thisCell.newVelocity.x = uX; thisCell.hasVelocityX = true; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (thisCell.content == CONTENT_FLUID || downCell.content == CONTENT_FLUID) { float uY = thisCell.newVelocity.y - dt_div_rho_div_dx * (thisCell.pressure - downCell.pressure); thisCell.newVelocity.y = uY; thisCell.hasVelocityY = true; } } } __global__ void extrapolateVelocityByOne(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); const float epsilon = 1e-6; if (!thisCell.hasVelocityX) { float sumNeighborX = 0; int neighborXCount = 0; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (leftCell.hasVelocityX && leftCell.newVelocity.x > epsilon) { sumNeighborX += leftCell.newVelocity.x; neighborXCount++; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (downCell.hasVelocityX && downCell.newVelocity.y > epsilon) { sumNeighborX += downCell.newVelocity.x; neighborXCount++; } } if (x < sizeX - 1) { Cell2D& rightCell = get2D(cells, x + 1, y); if (rightCell.hasVelocityX && rightCell.newVelocity.x < -epsilon) { sumNeighborX += rightCell.newVelocity.x; neighborXCount++; } } if (y < sizeY - 1) { Cell2D& upCell = get2D(cells, x, y + 1); if (upCell.hasVelocityX && upCell.newVelocity.y < -epsilon) { sumNeighborX += upCell.newVelocity.x; neighborXCount++; } } if (neighborXCount > 0) { thisCell.newVelocity.x = sumNeighborX / (float)neighborXCount; thisCell.hasVelocityX = true; } } if (!thisCell.hasVelocityY) { float sumNeighborY = 0; int neighborYCount = 0; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (leftCell.hasVelocityY && leftCell.newVelocity.x > epsilon) { sumNeighborY += leftCell.newVelocity.y; neighborYCount++; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (downCell.hasVelocityY && downCell.newVelocity.y > epsilon) { sumNeighborY += downCell.newVelocity.y; neighborYCount++; } } if (x < sizeX - 1) { Cell2D& rightCell = get2D(cells, x + 1, y); if (rightCell.hasVelocityY && rightCell.newVelocity.x < -epsilon) { sumNeighborY += rightCell.newVelocity.y; neighborYCount++; } } if (y < sizeY - 1) { Cell2D& upCell = get2D(cells, x, y + 1); if (upCell.hasVelocityY && upCell.newVelocity.y < -epsilon) { sumNeighborY += upCell.newVelocity.y; neighborYCount++; } } if (neighborYCount > 0) { thisCell.newVelocity.y = sumNeighborY / (float)neighborYCount; thisCell.hasVelocityY = true; } } } __global__ void drawCellImpl(Cell2D* cells, int sizeX, int sizeY, unsigned char* image) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); unsigned char* base = image + 4 * (sizeX * y + x); int cellID = x * (sizeY + 1) + y; if (thisCell.content == CONTENT_FLUID) { float fluid1percentage = thisCell.fluid1Count / (thisCell.fluid1Count + thisCell.fluid0Count); base[0] = 255 * fluid1percentage; base[1] = 0; base[2] = 255 * (1 - fluid1percentage); thisCell.fluid1Count = thisCell.fluid0Count = 0; } else { base[0] = 255; base[1] = 255; base[2] = 255; } base[3] = 255; } __global__ void computeDivergenceImpl(Cell2D* cells, int sizeX, int sizeY, float cellPhysicalSize) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); Cell2D& upCell = get2D(cells, x, y + 1); Cell2D& rightCell = get2D(cells, x + 1, y); float div = (upCell.newVelocity.y - thisCell.newVelocity.y + rightCell.newVelocity.x - thisCell.newVelocity.x) / cellPhysicalSize; thisCell.divergence = div; } __global__ void resetPressureImpl(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); thisCell.pressure = 0; } __global__ void jacobiImpl(Cell2D* cells, int sizeX, int sizeY, float dt_div_rho_div_dx, float cellPhysicalSize) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); Cell2D& rightCell = get2D(cells, x + 1, y); Cell2D& upCell = get2D(cells, x, y + 1); if (thisCell.content == CONTENT_AIR) { thisCell.pressure = 0; return; } float RHS = -thisCell.divergence * cellPhysicalSize; float newPressure = 0; float centerCoeff = dt_div_rho_div_dx * 4; if (x > 0) newPressure += get2D(cells, x - 1, y).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (x < sizeX - 1) newPressure += get2D(cells, x + 1, y).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (y > 0) newPressure += get2D(cells, x, y - 1).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (y < sizeY - 1) newPressure += get2D(cells, x, y + 1).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } newPressure += RHS; newPressure /= centerCoeff; thisCell.pressure = newPressure; } */
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> /* #include "Fluid_2D_kernels.cuh" __global__ void applyGravityImpl(Cell2D* cells, int sizeX, int sizeY, float timeStep, float gravitationalAcceleration) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; if (get2D(cells, x, y).content == CONTENT_FLUID) { get2D(cells, x, y).newVelocity.y -= gravitationalAcceleration * timeStep; if (get2D(cells, x, y + 1).content == CONTENT_AIR) get2D(cells, x, y + 1).newVelocity.y -= gravitationalAcceleration * timeStep; } else if (get2D(cells, x, y).content == CONTENT_AIR) { //if( x-1 >0 && grid.get2D(cells,x-1,y).content == CONTENT_AIR) grid.get2D(cells,x,y).newVelocity.x = 0; //if( y-1 >0 && grid.get2D(cells,x,y-1).content == CONTENT_AIR) grid.get2D(cells,x,y).newVelocity.y = 0; } } __global__ void fixBoundaryX(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index > sizeY) return; int y = index; get2D(cells, 0, y).newVelocity.x = 0; get2D(cells, sizeX, y).newVelocity.x = 0; get2D(cells, 0, y).hasVelocityX = true; get2D(cells, sizeX, y).hasVelocityX = true; get2D(cells, sizeX, y).content = CONTENT_SOLID; } __global__ void fixBoundaryY(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index > sizeX) return; int x = index; get2D(cells, x, 0).newVelocity.y = 0; //get2D(cells,x,sizeY).newVelocity.y = 0; get2D(cells, x, 0).hasVelocityY = true; get2D(cells, x, sizeY).hasVelocityY = true; get2D(cells, x, sizeY).content = CONTENT_AIR; } __device__ __host__ float getNeibourCoefficient(int x, int y, float dt_div_rho_div_dx, float u, float& centerCoefficient, float& RHS, Cell2D* cells, int sizeX, int sizeY) { if (x >= 0 && x < sizeX && y >= 0 && y < sizeY && get2D(cells, x, y).content == CONTENT_FLUID) { return dt_div_rho_div_dx * -1; } else { if (x < 0 || y < 0 || x >= sizeX || y >= sizeY || get2D(cells, x, y).content == CONTENT_SOLID) { centerCoefficient -= dt_div_rho_div_dx; //RHS += u; return 0; } else if (get2D(cells, x, y).content == CONTENT_AIR) { return 0; } } } __global__ void constructPressureEquations(Cell2D* cells, int sizeX, int sizeY, PressureEquation2D* equations, float dt_div_rho_div_dx, bool* hasNonZeroRHS) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; get2D(cells, x, y).pressure = 0; if (get2D(cells, x, y).content != CONTENT_FLUID) return; Cell2D& thisCell = get2D(cells, x, y); Cell2D& rightCell = get2D(cells, x + 1, y); Cell2D& upCell = get2D(cells, x, y + 1); PressureEquation2D thisEquation; float RHS = (thisCell.newVelocity.y - upCell.newVelocity.y + thisCell.newVelocity.x - rightCell.newVelocity.x); float centerCoeff = dt_div_rho_div_dx * 4; float leftCoeff = getNeibourCoefficient(x - 1, y, dt_div_rho_div_dx, thisCell.newVelocity.x, centerCoeff, RHS, cells, sizeX, sizeY); float rightCoeff = getNeibourCoefficient(x + 1, y, dt_div_rho_div_dx, rightCell.newVelocity.x, centerCoeff, RHS, cells, sizeX, sizeY); float downCoeff = getNeibourCoefficient(x, y - 1, dt_div_rho_div_dx, thisCell.newVelocity.y, centerCoeff, RHS, cells, sizeX, sizeY); float upCoeff = getNeibourCoefficient(x, y + 1, dt_div_rho_div_dx, upCell.newVelocity.y, centerCoeff, RHS, cells, sizeX, sizeY); int nnz = 0; if (downCoeff) { Cell2D& downCell = get2D(cells, x, y - 1); thisEquation.termsIndex[thisEquation.termCount] = downCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = downCoeff; ++thisEquation.termCount; ++nnz; } if (leftCoeff) { Cell2D& leftCell = get2D(cells, x - 1, y); thisEquation.termsIndex[thisEquation.termCount] = leftCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = leftCoeff; ++thisEquation.termCount; ++nnz; } thisEquation.termsIndex[thisEquation.termCount] = thisCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = centerCoeff; ++thisEquation.termCount; if (rightCoeff) { thisEquation.termsIndex[thisEquation.termCount] = rightCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = rightCoeff; ++thisEquation.termCount; ++nnz; } if (upCoeff) { thisEquation.termsIndex[thisEquation.termCount] = upCell.fluidIndex; thisEquation.termsCoeff[thisEquation.termCount] = upCoeff; ++thisEquation.termCount; ++nnz; } ++nnz; thisEquation.RHS = RHS; if (RHS != 0) { *hasNonZeroRHS = true; } thisEquation.x = x; thisEquation.y = y; equations[thisCell.fluidIndex] = thisEquation; } __global__ void setPressure(Cell2D* cells, int sizeX, int sizeY, double* pressureResult) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; if (get2D(cells, x, y).content != CONTENT_FLUID) return; get2D(cells, x, y).pressure = pressureResult[get2D(cells, x, y).fluidIndex]; } __global__ void updateVelocityWithPressureImpl(Cell2D* cells, int sizeX, int sizeY, float dt_div_rho_div_dx) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); thisCell.hasVelocityX = false; thisCell.hasVelocityY = false; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (thisCell.content == CONTENT_FLUID || leftCell.content == CONTENT_FLUID) { float uX = thisCell.newVelocity.x - dt_div_rho_div_dx * (thisCell.pressure - leftCell.pressure); thisCell.newVelocity.x = uX; thisCell.hasVelocityX = true; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (thisCell.content == CONTENT_FLUID || downCell.content == CONTENT_FLUID) { float uY = thisCell.newVelocity.y - dt_div_rho_div_dx * (thisCell.pressure - downCell.pressure); thisCell.newVelocity.y = uY; thisCell.hasVelocityY = true; } } } __global__ void extrapolateVelocityByOne(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); const float epsilon = 1e-6; if (!thisCell.hasVelocityX) { float sumNeighborX = 0; int neighborXCount = 0; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (leftCell.hasVelocityX && leftCell.newVelocity.x > epsilon) { sumNeighborX += leftCell.newVelocity.x; neighborXCount++; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (downCell.hasVelocityX && downCell.newVelocity.y > epsilon) { sumNeighborX += downCell.newVelocity.x; neighborXCount++; } } if (x < sizeX - 1) { Cell2D& rightCell = get2D(cells, x + 1, y); if (rightCell.hasVelocityX && rightCell.newVelocity.x < -epsilon) { sumNeighborX += rightCell.newVelocity.x; neighborXCount++; } } if (y < sizeY - 1) { Cell2D& upCell = get2D(cells, x, y + 1); if (upCell.hasVelocityX && upCell.newVelocity.y < -epsilon) { sumNeighborX += upCell.newVelocity.x; neighborXCount++; } } if (neighborXCount > 0) { thisCell.newVelocity.x = sumNeighborX / (float)neighborXCount; thisCell.hasVelocityX = true; } } if (!thisCell.hasVelocityY) { float sumNeighborY = 0; int neighborYCount = 0; if (x > 0) { Cell2D& leftCell = get2D(cells, x - 1, y); if (leftCell.hasVelocityY && leftCell.newVelocity.x > epsilon) { sumNeighborY += leftCell.newVelocity.y; neighborYCount++; } } if (y > 0) { Cell2D& downCell = get2D(cells, x, y - 1); if (downCell.hasVelocityY && downCell.newVelocity.y > epsilon) { sumNeighborY += downCell.newVelocity.y; neighborYCount++; } } if (x < sizeX - 1) { Cell2D& rightCell = get2D(cells, x + 1, y); if (rightCell.hasVelocityY && rightCell.newVelocity.x < -epsilon) { sumNeighborY += rightCell.newVelocity.y; neighborYCount++; } } if (y < sizeY - 1) { Cell2D& upCell = get2D(cells, x, y + 1); if (upCell.hasVelocityY && upCell.newVelocity.y < -epsilon) { sumNeighborY += upCell.newVelocity.y; neighborYCount++; } } if (neighborYCount > 0) { thisCell.newVelocity.y = sumNeighborY / (float)neighborYCount; thisCell.hasVelocityY = true; } } } __global__ void drawCellImpl(Cell2D* cells, int sizeX, int sizeY, unsigned char* image) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); unsigned char* base = image + 4 * (sizeX * y + x); int cellID = x * (sizeY + 1) + y; if (thisCell.content == CONTENT_FLUID) { float fluid1percentage = thisCell.fluid1Count / (thisCell.fluid1Count + thisCell.fluid0Count); base[0] = 255 * fluid1percentage; base[1] = 0; base[2] = 255 * (1 - fluid1percentage); thisCell.fluid1Count = thisCell.fluid0Count = 0; } else { base[0] = 255; base[1] = 255; base[2] = 255; } base[3] = 255; } __global__ void computeDivergenceImpl(Cell2D* cells, int sizeX, int sizeY, float cellPhysicalSize) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); Cell2D& upCell = get2D(cells, x, y + 1); Cell2D& rightCell = get2D(cells, x + 1, y); float div = (upCell.newVelocity.y - thisCell.newVelocity.y + rightCell.newVelocity.x - thisCell.newVelocity.x) / cellPhysicalSize; thisCell.divergence = div; } __global__ void resetPressureImpl(Cell2D* cells, int sizeX, int sizeY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); thisCell.pressure = 0; } __global__ void jacobiImpl(Cell2D* cells, int sizeX, int sizeY, float dt_div_rho_div_dx, float cellPhysicalSize) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= sizeX * sizeY) return; int x = index / sizeY; int y = index - x * sizeY; Cell2D& thisCell = get2D(cells, x, y); Cell2D& rightCell = get2D(cells, x + 1, y); Cell2D& upCell = get2D(cells, x, y + 1); if (thisCell.content == CONTENT_AIR) { thisCell.pressure = 0; return; } float RHS = -thisCell.divergence * cellPhysicalSize; float newPressure = 0; float centerCoeff = dt_div_rho_div_dx * 4; if (x > 0) newPressure += get2D(cells, x - 1, y).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (x < sizeX - 1) newPressure += get2D(cells, x + 1, y).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (y > 0) newPressure += get2D(cells, x, y - 1).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } if (y < sizeY - 1) newPressure += get2D(cells, x, y + 1).pressure * dt_div_rho_div_dx; else { centerCoeff -= dt_div_rho_div_dx; } newPressure += RHS; newPressure /= centerCoeff; thisCell.pressure = newPressure; } */
.text .file "Fluid_2D_kernels.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001b1c6a_00000000-6_Fluid_2D_kernels.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "Fluid_2D_kernels.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #include "cs_cuda.h" #include "cs_helper.h" #include "cs_dbg.h" #include "cs_interpolate.h" // #define CUDA_DBG // #define CUDA_DBG1 __global__ void d_make_interpolate_420_1 ( int *input, int *output, int xdim, int ydim, int zdim, int frsize, int nxdim, int nydim, int nfrsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / nfrsize ; i = t_idx % nfrsize ; row_idx = i / nxdim ; if (!( row_idx & 1 )) { row_idx >>= 1 ; i %= nxdim ; column_idx = ( i >> 1 ) ; i = frame_n * frsize + row_idx * xdim + column_idx ; output[ t_idx ] = input [ i ] ; } t_idx += CUDA_MAX_THREADS ; } } // xdim/ydim/frsize are all for the new interpolated data __global__ void d_make_interpolate_420_2 ( int *input, int xdim, int ydim, int frsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int from_row_1, from_row_2, i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / frsize ; i = t_idx % frsize ; row_idx = i / xdim ; if ( row_idx & 1 ) { from_row_1 = row_idx - 1 ; from_row_2 = row_idx + 1 ; column_idx = i % xdim ; if ( from_row_2 == ydim ) { input[ t_idx ] = input [ frame_n * frsize + from_row_1 * xdim + column_idx ] ; } else { input[ t_idx ] = ( input [ frame_n * frsize + from_row_1 * xdim + column_idx ] + input [ frame_n * frsize + from_row_2 * xdim + column_idx ] ) / 2 ; } } t_idx += CUDA_MAX_THREADS ; } } // take care of the columns __global__ void d_make_interpolate_420_3 ( int *input, int xdim, int frsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int from_col_1, from_col_2, i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / frsize ; i = t_idx % frsize ; row_idx = i / xdim ; column_idx = i % xdim ; if ( column_idx & 1 ) { from_col_1 = column_idx - 1 ; from_col_2 = column_idx + 1 ; if ( from_col_2 != xdim ) { input[ t_idx ] = ( input [ frame_n * frsize + row_idx * xdim + from_col_1 ] + input [ frame_n * frsize + row_idx * xdim + from_col_2 ] ) / 2 ; } } t_idx += CUDA_MAX_THREADS ; } } /* input : device addr ... also the output addr ... pls note output : device addr xdim : x dimension of frame ydim : y dimension of frame zdim : z dimension of frame, i.e. temporal scheme : INT_YUV420 currently */ int h_make_interpolate ( int *d_input, int *d_output, int xdim, int ydim, int zdim, int scheme #ifdef CUDA_OBS , int *cudadbgp #endif ) { int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int oframe_size, nframe_size, nn, nBlocks ; switch ( scheme ) { case INT_YUV420 : oframe_size = xdim * ydim ; nframe_size = oframe_size * 4 ; // YUV420 nn = nframe_size * zdim ; // nBlocks = ( nn + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ; h_block_adj ( nn, nThreadsPerBlock, &nBlocks ) ; #ifdef CUDA_DBG fprintf( stderr, "%s: din %p dout %p x/y/z %d %d %d sche %d\n", __func__, d_input, d_output, xdim, ydim, zdim, scheme ) ; #endif d_make_interpolate_420_1 <<< nBlocks, nThreadsPerBlock >>> ( d_input, d_output, xdim, ydim, zdim, oframe_size, xdim << 1, ydim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; cudaThreadSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_1", d_output, nn ) ; #endif d_make_interpolate_420_2 <<< nBlocks, nThreadsPerBlock >>> ( d_output, xdim << 1, ydim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; cudaThreadSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_2", d_output, nn ) ; #endif d_make_interpolate_420_3 <<< nBlocks, nThreadsPerBlock >>> ( d_output, xdim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; cudaThreadSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_3", d_output, nn ) ; #endif break ; default : return ( 0 ) ; } return ( 1 ) ; }
.file "tmpxft_00075439_00000000-6_cs_interpolate.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z54__device_stub__Z24d_make_interpolate_420_1PiS_iiiiiiiiPiS_iiiiiiii .type _Z54__device_stub__Z24d_make_interpolate_420_1PiS_iiiiiiiiPiS_iiiiiiii, @function _Z54__device_stub__Z24d_make_interpolate_420_1PiS_iiiiiiiiPiS_iiiiiiii: .LFB2082: .cfi_startproc endbr64 subq $200, %rsp .cfi_def_cfa_offset 208 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) movl %r9d, (%rsp) movq %fs:40, %rax movq %rax, 184(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 4(%rsp), %rax movq %rax, 128(%rsp) movq %rsp, %rax movq %rax, 136(%rsp) leaq 208(%rsp), %rax movq %rax, 144(%rsp) leaq 216(%rsp), %rax movq %rax, 152(%rsp) leaq 224(%rsp), %rax movq %rax, 160(%rsp) leaq 232(%rsp), %rax movq %rax, 168(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 184(%rsp), %rax subq %fs:40, %rax jne .L8 addq $200, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 216 pushq 40(%rsp) .cfi_def_cfa_offset 224 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z24d_make_interpolate_420_1PiS_iiiiiiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 208 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z54__device_stub__Z24d_make_interpolate_420_1PiS_iiiiiiiiPiS_iiiiiiii, .-_Z54__device_stub__Z24d_make_interpolate_420_1PiS_iiiiiiiiPiS_iiiiiiii .globl _Z24d_make_interpolate_420_1PiS_iiiiiiii .type _Z24d_make_interpolate_420_1PiS_iiiiiiii, @function _Z24d_make_interpolate_420_1PiS_iiiiiiii: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movl 40(%rsp), %eax pushq %rax .cfi_def_cfa_offset 24 movl 40(%rsp), %eax pushq %rax .cfi_def_cfa_offset 32 movl 40(%rsp), %eax pushq %rax .cfi_def_cfa_offset 40 movl 40(%rsp), %eax pushq %rax .cfi_def_cfa_offset 48 call _Z54__device_stub__Z24d_make_interpolate_420_1PiS_iiiiiiiiPiS_iiiiiiii addq $40, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z24d_make_interpolate_420_1PiS_iiiiiiii, .-_Z24d_make_interpolate_420_1PiS_iiiiiiii .globl _Z48__device_stub__Z24d_make_interpolate_420_2PiiiiiPiiiii .type _Z48__device_stub__Z24d_make_interpolate_420_2PiiiiiPiiiii, @function _Z48__device_stub__Z24d_make_interpolate_420_2PiiiiiPiiiii: .LFB2084: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movl %edx, 16(%rsp) movl %ecx, 12(%rsp) movl %r8d, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 12(%rsp), %rax movq %rax, 120(%rsp) leaq 8(%rsp), %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z24d_make_interpolate_420_2Piiiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z48__device_stub__Z24d_make_interpolate_420_2PiiiiiPiiiii, .-_Z48__device_stub__Z24d_make_interpolate_420_2PiiiiiPiiiii .globl _Z24d_make_interpolate_420_2Piiiii .type _Z24d_make_interpolate_420_2Piiiii, @function _Z24d_make_interpolate_420_2Piiiii: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z48__device_stub__Z24d_make_interpolate_420_2PiiiiiPiiiii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z24d_make_interpolate_420_2Piiiii, .-_Z24d_make_interpolate_420_2Piiiii .globl _Z47__device_stub__Z24d_make_interpolate_420_3PiiiiPiiii .type _Z47__device_stub__Z24d_make_interpolate_420_3PiiiiPiiii, @function _Z47__device_stub__Z24d_make_interpolate_420_3PiiiiPiiii: .LFB2086: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movl %edx, 16(%rsp) movl %ecx, 12(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 12(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L23 .L19: movq 136(%rsp), %rax subq %fs:40, %rax jne .L24 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z24d_make_interpolate_420_3Piiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L19 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE2086: .size _Z47__device_stub__Z24d_make_interpolate_420_3PiiiiPiiii, .-_Z47__device_stub__Z24d_make_interpolate_420_3PiiiiPiiii .globl _Z24d_make_interpolate_420_3Piiii .type _Z24d_make_interpolate_420_3Piiii, @function _Z24d_make_interpolate_420_3Piiii: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z47__device_stub__Z24d_make_interpolate_420_3PiiiiPiiii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _Z24d_make_interpolate_420_3Piiii, .-_Z24d_make_interpolate_420_3Piiii .globl _Z18h_make_interpolatePiS_iiii .type _Z18h_make_interpolatePiS_iiii, @function _Z18h_make_interpolatePiS_iiii: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $72, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax cmpl $3, %r9d je .L35 .L27: movq 56(%rsp), %rdx subq %fs:40, %rdx jne .L36 addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L35: .cfi_restore_state movl %edx, %ebx movl %ecx, %ebp movl %r8d, %r12d movl %edx, %r15d imull %ecx, %r15d leal 0(,%r15,4), %r13d movl %r13d, %r14d imull %r8d, %r14d leaq 28(%rsp), %rdx movl $512, %esi movl %r14d, %edi call _Z11h_block_adjiiPi@PLT movl $512, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl 28(%rsp), %eax movl %eax, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L37 .L29: call cudaThreadSynchronize@PLT movl $512, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl 28(%rsp), %eax movl %eax, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L38 .L30: call cudaThreadSynchronize@PLT movl $512, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl 28(%rsp), %eax movl %eax, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L39 .L31: call cudaThreadSynchronize@PLT movl $1, %eax jmp .L27 .L37: pushq %r14 .cfi_def_cfa_offset 136 pushq %r13 .cfi_def_cfa_offset 144 leal (%rbp,%rbp), %eax pushq %rax .cfi_def_cfa_offset 152 leal (%rbx,%rbx), %eax pushq %rax .cfi_def_cfa_offset 160 movl %r15d, %r9d movl %r12d, %r8d movl %ebp, %ecx movl %ebx, %edx movq 32(%rsp), %rsi movq 40(%rsp), %rdi call _Z54__device_stub__Z24d_make_interpolate_420_1PiS_iiiiiiiiPiS_iiiiiiii addq $32, %rsp .cfi_def_cfa_offset 128 jmp .L29 .L38: leal (%rbp,%rbp), %edx leal (%rbx,%rbx), %esi movl %r14d, %r8d movl %r13d, %ecx movq (%rsp), %rdi call _Z48__device_stub__Z24d_make_interpolate_420_2PiiiiiPiiiii jmp .L30 .L39: leal (%rbx,%rbx), %esi movl %r14d, %ecx movl %r13d, %edx movq (%rsp), %rdi call _Z47__device_stub__Z24d_make_interpolate_420_3PiiiiPiiii jmp .L31 .L36: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z18h_make_interpolatePiS_iiii, .-_Z18h_make_interpolatePiS_iiii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z24d_make_interpolate_420_3Piiii" .align 8 .LC1: .string "_Z24d_make_interpolate_420_2Piiiii" .align 8 .LC2: .string "_Z24d_make_interpolate_420_1PiS_iiiiiiii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2089: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z24d_make_interpolate_420_3Piiii(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z24d_make_interpolate_420_2Piiiii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z24d_make_interpolate_420_1PiS_iiiiiiii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #include "cs_cuda.h" #include "cs_helper.h" #include "cs_dbg.h" #include "cs_interpolate.h" // #define CUDA_DBG // #define CUDA_DBG1 __global__ void d_make_interpolate_420_1 ( int *input, int *output, int xdim, int ydim, int zdim, int frsize, int nxdim, int nydim, int nfrsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / nfrsize ; i = t_idx % nfrsize ; row_idx = i / nxdim ; if (!( row_idx & 1 )) { row_idx >>= 1 ; i %= nxdim ; column_idx = ( i >> 1 ) ; i = frame_n * frsize + row_idx * xdim + column_idx ; output[ t_idx ] = input [ i ] ; } t_idx += CUDA_MAX_THREADS ; } } // xdim/ydim/frsize are all for the new interpolated data __global__ void d_make_interpolate_420_2 ( int *input, int xdim, int ydim, int frsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int from_row_1, from_row_2, i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / frsize ; i = t_idx % frsize ; row_idx = i / xdim ; if ( row_idx & 1 ) { from_row_1 = row_idx - 1 ; from_row_2 = row_idx + 1 ; column_idx = i % xdim ; if ( from_row_2 == ydim ) { input[ t_idx ] = input [ frame_n * frsize + from_row_1 * xdim + column_idx ] ; } else { input[ t_idx ] = ( input [ frame_n * frsize + from_row_1 * xdim + column_idx ] + input [ frame_n * frsize + from_row_2 * xdim + column_idx ] ) / 2 ; } } t_idx += CUDA_MAX_THREADS ; } } // take care of the columns __global__ void d_make_interpolate_420_3 ( int *input, int xdim, int frsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int from_col_1, from_col_2, i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / frsize ; i = t_idx % frsize ; row_idx = i / xdim ; column_idx = i % xdim ; if ( column_idx & 1 ) { from_col_1 = column_idx - 1 ; from_col_2 = column_idx + 1 ; if ( from_col_2 != xdim ) { input[ t_idx ] = ( input [ frame_n * frsize + row_idx * xdim + from_col_1 ] + input [ frame_n * frsize + row_idx * xdim + from_col_2 ] ) / 2 ; } } t_idx += CUDA_MAX_THREADS ; } } /* input : device addr ... also the output addr ... pls note output : device addr xdim : x dimension of frame ydim : y dimension of frame zdim : z dimension of frame, i.e. temporal scheme : INT_YUV420 currently */ int h_make_interpolate ( int *d_input, int *d_output, int xdim, int ydim, int zdim, int scheme #ifdef CUDA_OBS , int *cudadbgp #endif ) { int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int oframe_size, nframe_size, nn, nBlocks ; switch ( scheme ) { case INT_YUV420 : oframe_size = xdim * ydim ; nframe_size = oframe_size * 4 ; // YUV420 nn = nframe_size * zdim ; // nBlocks = ( nn + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ; h_block_adj ( nn, nThreadsPerBlock, &nBlocks ) ; #ifdef CUDA_DBG fprintf( stderr, "%s: din %p dout %p x/y/z %d %d %d sche %d\n", __func__, d_input, d_output, xdim, ydim, zdim, scheme ) ; #endif d_make_interpolate_420_1 <<< nBlocks, nThreadsPerBlock >>> ( d_input, d_output, xdim, ydim, zdim, oframe_size, xdim << 1, ydim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; cudaThreadSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_1", d_output, nn ) ; #endif d_make_interpolate_420_2 <<< nBlocks, nThreadsPerBlock >>> ( d_output, xdim << 1, ydim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; cudaThreadSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_2", d_output, nn ) ; #endif d_make_interpolate_420_3 <<< nBlocks, nThreadsPerBlock >>> ( d_output, xdim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; cudaThreadSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_3", d_output, nn ) ; #endif break ; default : return ( 0 ) ; } return ( 1 ) ; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include "cs_cuda.h" #include "cs_helper.h" #include "cs_dbg.h" #include "cs_interpolate.h" // #define CUDA_DBG // #define CUDA_DBG1 __global__ void d_make_interpolate_420_1 ( int *input, int *output, int xdim, int ydim, int zdim, int frsize, int nxdim, int nydim, int nfrsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / nfrsize ; i = t_idx % nfrsize ; row_idx = i / nxdim ; if (!( row_idx & 1 )) { row_idx >>= 1 ; i %= nxdim ; column_idx = ( i >> 1 ) ; i = frame_n * frsize + row_idx * xdim + column_idx ; output[ t_idx ] = input [ i ] ; } t_idx += CUDA_MAX_THREADS ; } } // xdim/ydim/frsize are all for the new interpolated data __global__ void d_make_interpolate_420_2 ( int *input, int xdim, int ydim, int frsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int from_row_1, from_row_2, i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / frsize ; i = t_idx % frsize ; row_idx = i / xdim ; if ( row_idx & 1 ) { from_row_1 = row_idx - 1 ; from_row_2 = row_idx + 1 ; column_idx = i % xdim ; if ( from_row_2 == ydim ) { input[ t_idx ] = input [ frame_n * frsize + from_row_1 * xdim + column_idx ] ; } else { input[ t_idx ] = ( input [ frame_n * frsize + from_row_1 * xdim + column_idx ] + input [ frame_n * frsize + from_row_2 * xdim + column_idx ] ) / 2 ; } } t_idx += CUDA_MAX_THREADS ; } } // take care of the columns __global__ void d_make_interpolate_420_3 ( int *input, int xdim, int frsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int from_col_1, from_col_2, i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / frsize ; i = t_idx % frsize ; row_idx = i / xdim ; column_idx = i % xdim ; if ( column_idx & 1 ) { from_col_1 = column_idx - 1 ; from_col_2 = column_idx + 1 ; if ( from_col_2 != xdim ) { input[ t_idx ] = ( input [ frame_n * frsize + row_idx * xdim + from_col_1 ] + input [ frame_n * frsize + row_idx * xdim + from_col_2 ] ) / 2 ; } } t_idx += CUDA_MAX_THREADS ; } } /* input : device addr ... also the output addr ... pls note output : device addr xdim : x dimension of frame ydim : y dimension of frame zdim : z dimension of frame, i.e. temporal scheme : INT_YUV420 currently */ int h_make_interpolate ( int *d_input, int *d_output, int xdim, int ydim, int zdim, int scheme #ifdef CUDA_OBS , int *cudadbgp #endif ) { int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int oframe_size, nframe_size, nn, nBlocks ; switch ( scheme ) { case INT_YUV420 : oframe_size = xdim * ydim ; nframe_size = oframe_size * 4 ; // YUV420 nn = nframe_size * zdim ; // nBlocks = ( nn + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ; h_block_adj ( nn, nThreadsPerBlock, &nBlocks ) ; #ifdef CUDA_DBG fprintf( stderr, "%s: din %p dout %p x/y/z %d %d %d sche %d\n", __func__, d_input, d_output, xdim, ydim, zdim, scheme ) ; #endif d_make_interpolate_420_1 <<< nBlocks, nThreadsPerBlock >>> ( d_input, d_output, xdim, ydim, zdim, oframe_size, xdim << 1, ydim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; hipDeviceSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_1", d_output, nn ) ; #endif d_make_interpolate_420_2 <<< nBlocks, nThreadsPerBlock >>> ( d_output, xdim << 1, ydim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; hipDeviceSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_2", d_output, nn ) ; #endif d_make_interpolate_420_3 <<< nBlocks, nThreadsPerBlock >>> ( d_output, xdim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; hipDeviceSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_3", d_output, nn ) ; #endif break ; default : return ( 0 ) ; } return ( 1 ) ; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include "cs_cuda.h" #include "cs_helper.h" #include "cs_dbg.h" #include "cs_interpolate.h" // #define CUDA_DBG // #define CUDA_DBG1 __global__ void d_make_interpolate_420_1 ( int *input, int *output, int xdim, int ydim, int zdim, int frsize, int nxdim, int nydim, int nfrsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / nfrsize ; i = t_idx % nfrsize ; row_idx = i / nxdim ; if (!( row_idx & 1 )) { row_idx >>= 1 ; i %= nxdim ; column_idx = ( i >> 1 ) ; i = frame_n * frsize + row_idx * xdim + column_idx ; output[ t_idx ] = input [ i ] ; } t_idx += CUDA_MAX_THREADS ; } } // xdim/ydim/frsize are all for the new interpolated data __global__ void d_make_interpolate_420_2 ( int *input, int xdim, int ydim, int frsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int from_row_1, from_row_2, i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / frsize ; i = t_idx % frsize ; row_idx = i / xdim ; if ( row_idx & 1 ) { from_row_1 = row_idx - 1 ; from_row_2 = row_idx + 1 ; column_idx = i % xdim ; if ( from_row_2 == ydim ) { input[ t_idx ] = input [ frame_n * frsize + from_row_1 * xdim + column_idx ] ; } else { input[ t_idx ] = ( input [ frame_n * frsize + from_row_1 * xdim + column_idx ] + input [ frame_n * frsize + from_row_2 * xdim + column_idx ] ) / 2 ; } } t_idx += CUDA_MAX_THREADS ; } } // take care of the columns __global__ void d_make_interpolate_420_3 ( int *input, int xdim, int frsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int from_col_1, from_col_2, i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / frsize ; i = t_idx % frsize ; row_idx = i / xdim ; column_idx = i % xdim ; if ( column_idx & 1 ) { from_col_1 = column_idx - 1 ; from_col_2 = column_idx + 1 ; if ( from_col_2 != xdim ) { input[ t_idx ] = ( input [ frame_n * frsize + row_idx * xdim + from_col_1 ] + input [ frame_n * frsize + row_idx * xdim + from_col_2 ] ) / 2 ; } } t_idx += CUDA_MAX_THREADS ; } } /* input : device addr ... also the output addr ... pls note output : device addr xdim : x dimension of frame ydim : y dimension of frame zdim : z dimension of frame, i.e. temporal scheme : INT_YUV420 currently */ int h_make_interpolate ( int *d_input, int *d_output, int xdim, int ydim, int zdim, int scheme #ifdef CUDA_OBS , int *cudadbgp #endif ) { int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int oframe_size, nframe_size, nn, nBlocks ; switch ( scheme ) { case INT_YUV420 : oframe_size = xdim * ydim ; nframe_size = oframe_size * 4 ; // YUV420 nn = nframe_size * zdim ; // nBlocks = ( nn + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ; h_block_adj ( nn, nThreadsPerBlock, &nBlocks ) ; #ifdef CUDA_DBG fprintf( stderr, "%s: din %p dout %p x/y/z %d %d %d sche %d\n", __func__, d_input, d_output, xdim, ydim, zdim, scheme ) ; #endif d_make_interpolate_420_1 <<< nBlocks, nThreadsPerBlock >>> ( d_input, d_output, xdim, ydim, zdim, oframe_size, xdim << 1, ydim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; hipDeviceSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_1", d_output, nn ) ; #endif d_make_interpolate_420_2 <<< nBlocks, nThreadsPerBlock >>> ( d_output, xdim << 1, ydim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; hipDeviceSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_2", d_output, nn ) ; #endif d_make_interpolate_420_3 <<< nBlocks, nThreadsPerBlock >>> ( d_output, xdim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; hipDeviceSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_3", d_output, nn ) ; #endif break ; default : return ( 0 ) ; } return ( 1 ) ; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z24d_make_interpolate_420_1PiS_iiiiiiii .globl _Z24d_make_interpolate_420_1PiS_iiiiiiii .p2align 8 .type _Z24d_make_interpolate_420_1PiS_iiiiiiii,@function _Z24d_make_interpolate_420_1PiS_iiiiiiii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x3c s_load_b32 s8, s[0:1], 0x2c s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s8, v1 s_cbranch_execz .LBB0_5 s_clause 0x1 s_load_b32 s9, s[0:1], 0x28 s_load_b64 s[2:3], s[0:1], 0x1c s_waitcnt lgkmcnt(0) s_ashr_i32 s10, s9, 31 s_ashr_i32 s11, s3, 31 s_add_i32 s4, s9, s10 s_add_i32 s5, s3, s11 s_xor_b32 s12, s4, s10 s_xor_b32 s13, s5, s11 v_cvt_f32_u32_e32 v0, s12 v_cvt_f32_u32_e32 v2, s13 s_sub_i32 s4, 0, s12 s_sub_i32 s5, 0, s13 s_sub_i32 s14, 0, s9 v_rcp_iflag_f32_e32 v0, v0 v_rcp_iflag_f32_e32 v2, v2 s_sub_i32 s3, 0, s3 s_waitcnt_depctr 0xfff v_mul_f32_e32 v0, 0x4f7ffffe, v0 v_mul_f32_e32 v2, 0x4f7ffffe, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cvt_u32_f32_e32 v0, v0 v_cvt_u32_f32_e32 v2, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_lo_u32 v3, s4, v0 v_mul_lo_u32 v4, s5, v2 s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b32 s0, s[0:1], 0x10 s_mov_b32 s1, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_hi_u32 v3, v0, v3 v_mul_hi_u32 v4, v2, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v0, v0, v3 v_add_nc_u32_e32 v3, v2, v4 s_branch .LBB0_3 .LBB0_2: s_or_b32 exec_lo, exec_lo, s15 v_add_nc_u32_e32 v1, 0x1fffe00, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_cmp_le_i32_e32 vcc_lo, s8, v1 s_or_b32 s1, vcc_lo, s1 s_and_not1_b32 exec_lo, exec_lo, s1 s_cbranch_execz .LBB0_5 .LBB0_3: v_ashrrev_i32_e32 v2, 31, v1 s_mov_b32 s15, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v4, v1, v2 v_xor_b32_e32 v4, v4, v2 v_xor_b32_e32 v2, s10, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_hi_u32 v5, v4, v0 v_mul_lo_u32 v6, v5, s12 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v4, v4, v6 v_add_nc_u32_e32 v6, 1, v5 v_subrev_nc_u32_e32 v7, s12, v4 v_cmp_le_u32_e32 vcc_lo, s12, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_cndmask_b32 v5, v5, v6 :: v_dual_cndmask_b32 v4, v4, v7 v_add_nc_u32_e32 v6, 1, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_le_u32_e32 vcc_lo, s12, v4 v_cndmask_b32_e32 v4, v5, v6, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_xor_b32_e32 v4, v4, v2 v_sub_nc_u32_e32 v2, v4, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mad_u64_u32 v[4:5], null, s14, v2, v[1:2] v_mul_lo_u32 v5, v2, s9 v_ashrrev_i32_e32 v4, 31, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_nc_u32_e32 v5, v4, v5 v_add_nc_u32_e32 v5, v1, v5 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_xor_b32_e32 v5, v5, v4 v_xor_b32_e32 v4, s11, v4 v_mul_hi_u32 v6, v5, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v7, v6, s13 v_sub_nc_u32_e32 v5, v5, v7 v_add_nc_u32_e32 v7, 1, v6 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v8, s13, v5 v_cmp_le_u32_e32 vcc_lo, s13, v5 v_dual_cndmask_b32 v6, v6, v7 :: v_dual_cndmask_b32 v5, v5, v8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v7, 1, v6 v_cmp_le_u32_e32 vcc_lo, s13, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v5, v6, v7, vcc_lo v_xor_b32_e32 v5, v5, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_nc_u32_e32 v4, v5, v4 v_and_b32_e32 v5, 1, v4 s_delay_alu instid0(VALU_DEP_1) v_cmpx_eq_u32_e32 0, v5 s_cbranch_execz .LBB0_2 v_mul_lo_u32 v5, s14, v2 v_mul_lo_u32 v6, s3, v4 v_ashrrev_i32_e32 v4, 1, v4 v_mul_lo_u32 v2, v2, s2 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_mul_lo_u32 v4, v4, s0 v_add3_u32 v5, v1, v6, v5 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v5, 1, v5 v_add3_u32 v4, v2, v4, v5 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v5, 31, v4 v_lshlrev_b64 v[4:5], 2, v[4:5] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v4, vcc_lo, s4, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo global_load_b32 v6, v[4:5], off v_lshlrev_b64 v[4:5], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v4, vcc_lo, s6, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo s_waitcnt vmcnt(0) global_store_b32 v[4:5], v6, off s_branch .LBB0_2 .LBB0_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z24d_make_interpolate_420_1PiS_iiiiiiii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 304 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z24d_make_interpolate_420_1PiS_iiiiiiii, .Lfunc_end0-_Z24d_make_interpolate_420_1PiS_iiiiiiii .section .AMDGPU.csdata,"",@progbits .text .protected _Z24d_make_interpolate_420_2Piiiii .globl _Z24d_make_interpolate_420_2Piiiii .p2align 8 .type _Z24d_make_interpolate_420_2Piiiii,@function _Z24d_make_interpolate_420_2Piiiii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b32 s4, s[0:1], 0x14 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s4, v1 s_cbranch_execz .LBB1_7 s_clause 0x1 s_load_b32 s6, s[0:1], 0x10 s_load_b128 s[0:3], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_ashr_i32 s7, s6, 31 s_ashr_i32 s5, s2, 31 s_add_i32 s6, s6, s7 s_add_i32 s8, s2, s5 s_xor_b32 s6, s6, s7 s_xor_b32 s7, s8, s5 v_cvt_f32_u32_e32 v0, s6 v_cvt_f32_u32_e32 v2, s7 s_sub_i32 s8, 0, s6 s_sub_i32 s9, 0, s7 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_rcp_iflag_f32_e32 v0, v0 v_rcp_iflag_f32_e32 v2, v2 s_waitcnt_depctr 0xfff v_mul_f32_e32 v0, 0x4f7ffffe, v0 v_mul_f32_e32 v2, 0x4f7ffffe, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cvt_u32_f32_e32 v0, v0 v_cvt_u32_f32_e32 v2, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_lo_u32 v3, s8, v0 v_mul_lo_u32 v4, s9, v2 s_mov_b32 s8, 0 s_sub_i32 s9, 0, s2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_hi_u32 v3, v0, v3 v_mul_hi_u32 v4, v2, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v0, v0, v3 v_add_nc_u32_e32 v3, v2, v4 s_branch .LBB1_4 .LBB1_2: s_or_b32 exec_lo, exec_lo, s11 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[5:6], 2, v[1:2] v_add_co_u32 v5, vcc_lo, s0, v5 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v6, vcc_lo, s1, v6, vcc_lo s_waitcnt vmcnt(0) global_store_b32 v[5:6], v4, off .LBB1_3: s_or_b32 exec_lo, exec_lo, s10 v_add_nc_u32_e32 v1, 0x1fffe00, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_cmp_le_i32_e32 vcc_lo, s4, v1 s_or_b32 s8, vcc_lo, s8 s_and_not1_b32 exec_lo, exec_lo, s8 s_cbranch_execz .LBB1_7 .LBB1_4: v_ashrrev_i32_e32 v2, 31, v1 s_mov_b32 s10, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v4, v1, v2 v_xor_b32_e32 v4, v4, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_hi_u32 v5, v4, v0 v_mul_lo_u32 v5, v5, s6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_nc_u32_e32 v4, v4, v5 v_subrev_nc_u32_e32 v5, s6, v4 v_cmp_le_u32_e32 vcc_lo, s6, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v6, v4, v5, vcc_lo v_subrev_nc_u32_e32 v7, s6, v6 v_cmp_le_u32_e32 vcc_lo, s6, v6 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v6, v6, v7, vcc_lo v_xor_b32_e32 v6, v6, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_nc_u32_e32 v6, v6, v2 v_ashrrev_i32_e32 v7, 31, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v8, v6, v7 v_xor_b32_e32 v8, v8, v7 v_xor_b32_e32 v7, s5, v7 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_hi_u32 v9, v8, v3 v_mul_lo_u32 v10, v9, s7 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v8, v8, v10 v_add_nc_u32_e32 v10, 1, v9 v_subrev_nc_u32_e32 v11, s7, v8 v_cmp_le_u32_e32 vcc_lo, s7, v8 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_cndmask_b32 v9, v9, v10 :: v_dual_cndmask_b32 v8, v8, v11 v_add_nc_u32_e32 v10, 1, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_le_u32_e32 vcc_lo, s7, v8 v_cndmask_b32_e32 v8, v9, v10, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_xor_b32_e32 v8, v8, v7 v_sub_nc_u32_e32 v7, v8, v7 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_and_b32_e32 v8, 1, v7 v_cmpx_eq_u32_e32 1, v8 s_cbranch_execz .LBB1_3 v_cmp_le_u32_e32 vcc_lo, s6, v4 s_mov_b32 s11, exec_lo v_cndmask_b32_e32 v4, v4, v5, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v5, s6, v4 v_cmp_le_u32_e32 vcc_lo, s6, v4 v_dual_cndmask_b32 v4, v4, v5 :: v_dual_add_nc_u32 v5, v6, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_xor_b32_e32 v2, v4, v2 v_sub_nc_u32_e32 v2, v5, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add3_u32 v4, s9, v1, v2 v_ashrrev_i32_e32 v5, 31, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[4:5] v_add_co_u32 v4, vcc_lo, s0, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo global_load_b32 v4, v[4:5], off v_add_nc_u32_e32 v5, 1, v7 v_cmpx_ne_u32_e64 s3, v5 s_cbranch_execz .LBB1_2 v_add3_u32 v5, s2, v1, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v6, 31, v5 v_lshlrev_b64 v[5:6], 2, v[5:6] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v5, vcc_lo, s0, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s1, v6, vcc_lo global_load_b32 v2, v[5:6], off s_waitcnt vmcnt(0) v_add_nc_u32_e32 v2, v2, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshrrev_b32_e32 v4, 31, v2 v_add_nc_u32_e32 v2, v2, v4 s_delay_alu instid0(VALU_DEP_1) v_ashrrev_i32_e32 v4, 1, v2 s_branch .LBB1_2 .LBB1_7: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z24d_make_interpolate_420_2Piiiii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 12 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z24d_make_interpolate_420_2Piiiii, .Lfunc_end1-_Z24d_make_interpolate_420_2Piiiii .section .AMDGPU.csdata,"",@progbits .text .protected _Z24d_make_interpolate_420_3Piiii .globl _Z24d_make_interpolate_420_3Piiii .p2align 8 .type _Z24d_make_interpolate_420_3Piiii,@function _Z24d_make_interpolate_420_3Piiii: s_clause 0x1 s_load_b32 s3, s[0:1], 0x24 s_load_b32 s2, s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s3, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1] s_mov_b32 s3, exec_lo v_cmpx_gt_i32_e64 s2, v1 s_cbranch_execz .LBB2_5 s_load_b128 s[4:7], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_add_u32 s1, s4, 4 s_addc_u32 s3, s5, 0 s_ashr_i32 s0, s7, 31 s_ashr_i32 s8, s6, 31 s_add_i32 s7, s7, s0 s_add_i32 s9, s6, s8 s_xor_b32 s7, s7, s0 s_xor_b32 s8, s9, s8 v_cvt_f32_u32_e32 v0, s7 v_cvt_f32_u32_e32 v2, s8 s_sub_i32 s0, 0, s7 s_sub_i32 s9, 0, s8 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_rcp_iflag_f32_e32 v0, v0 v_rcp_iflag_f32_e32 v2, v2 s_waitcnt_depctr 0xfff v_mul_f32_e32 v0, 0x4f7ffffe, v0 v_mul_f32_e32 v2, 0x4f7ffffe, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cvt_u32_f32_e32 v0, v0 v_cvt_u32_f32_e32 v2, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_lo_u32 v3, s0, v0 v_mul_lo_u32 v4, s9, v2 s_mov_b32 s9, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_hi_u32 v3, v0, v3 v_mul_hi_u32 v4, v2, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v0, v0, v3 v_add_nc_u32_e32 v3, v2, v4 s_branch .LBB2_3 .LBB2_2: s_or_b32 exec_lo, exec_lo, s0 v_add_nc_u32_e32 v1, 0x1fffe00, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_cmp_le_i32_e32 vcc_lo, s2, v1 s_or_b32 s9, vcc_lo, s9 s_and_not1_b32 exec_lo, exec_lo, s9 s_cbranch_execz .LBB2_5 .LBB2_3: v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v4, v1, v2 v_xor_b32_e32 v4, v4, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_hi_u32 v5, v4, v0 v_mul_lo_u32 v5, v5, s7 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_nc_u32_e32 v4, v4, v5 v_subrev_nc_u32_e32 v5, s7, v4 v_cmp_le_u32_e32 vcc_lo, s7, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v4, v4, v5, vcc_lo v_subrev_nc_u32_e32 v5, s7, v4 v_cmp_le_u32_e32 vcc_lo, s7, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v4, v4, v5, vcc_lo v_xor_b32_e32 v4, v4, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_nc_u32_e32 v2, v4, v2 v_ashrrev_i32_e32 v4, 31, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v2, v2, v4 v_xor_b32_e32 v2, v2, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_hi_u32 v5, v2, v3 v_mul_lo_u32 v5, v5, s8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_nc_u32_e32 v2, v2, v5 v_subrev_nc_u32_e32 v5, s8, v2 v_cmp_le_u32_e32 vcc_lo, s8, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v2, v2, v5, vcc_lo v_subrev_nc_u32_e32 v5, s8, v2 v_cmp_le_u32_e32 vcc_lo, s8, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v2, v2, v5, vcc_lo v_xor_b32_e32 v2, v2, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_nc_u32_e32 v2, v2, v4 v_and_b32_e32 v4, 1, v2 v_add_nc_u32_e32 v2, 1, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_eq_u32_e32 vcc_lo, 1, v4 v_cmp_ne_u32_e64 s0, s6, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s10, vcc_lo, s0 s_and_saveexec_b32 s0, s10 s_cbranch_execz .LBB2_2 v_add_nc_u32_e32 v4, -1, v1 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v5, 31, v4 v_lshlrev_b64 v[6:7], 2, v[1:2] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[4:5] v_add_co_u32 v4, vcc_lo, s4, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo v_add_co_u32 v8, vcc_lo, s1, v6 v_add_co_ci_u32_e32 v9, vcc_lo, s3, v7, vcc_lo s_clause 0x1 global_load_b32 v2, v[4:5], off global_load_b32 v4, v[8:9], off s_waitcnt vmcnt(0) v_add_nc_u32_e32 v2, v4, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshrrev_b32_e32 v4, 31, v2 v_add_nc_u32_e32 v2, v2, v4 v_add_co_u32 v4, vcc_lo, s4, v6 v_add_co_ci_u32_e32 v5, vcc_lo, s5, v7, vcc_lo s_delay_alu instid0(VALU_DEP_3) v_ashrrev_i32_e32 v2, 1, v2 global_store_b32 v[4:5], v2, off s_branch .LBB2_2 .LBB2_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z24d_make_interpolate_420_3Piiii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 10 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z24d_make_interpolate_420_3Piiii, .Lfunc_end2-_Z24d_make_interpolate_420_3Piiii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: by_value - .offset: 36 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: by_value - .offset: 44 .size: 4 .value_kind: by_value - .offset: 48 .size: 4 .value_kind: hidden_block_count_x - .offset: 52 .size: 4 .value_kind: hidden_block_count_y - .offset: 56 .size: 4 .value_kind: hidden_block_count_z - .offset: 60 .size: 2 .value_kind: hidden_group_size_x - .offset: 62 .size: 2 .value_kind: hidden_group_size_y - .offset: 64 .size: 2 .value_kind: hidden_group_size_z - .offset: 66 .size: 2 .value_kind: hidden_remainder_x - .offset: 68 .size: 2 .value_kind: hidden_remainder_y - .offset: 70 .size: 2 .value_kind: hidden_remainder_z - .offset: 88 .size: 8 .value_kind: hidden_global_offset_x - .offset: 96 .size: 8 .value_kind: hidden_global_offset_y - .offset: 104 .size: 8 .value_kind: hidden_global_offset_z - .offset: 112 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 304 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z24d_make_interpolate_420_1PiS_iiiiiiii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z24d_make_interpolate_420_1PiS_iiiiiiii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z24d_make_interpolate_420_2Piiiii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z24d_make_interpolate_420_2Piiiii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 12 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z24d_make_interpolate_420_3Piiii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z24d_make_interpolate_420_3Piiii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 10 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include "cs_cuda.h" #include "cs_helper.h" #include "cs_dbg.h" #include "cs_interpolate.h" // #define CUDA_DBG // #define CUDA_DBG1 __global__ void d_make_interpolate_420_1 ( int *input, int *output, int xdim, int ydim, int zdim, int frsize, int nxdim, int nydim, int nfrsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / nfrsize ; i = t_idx % nfrsize ; row_idx = i / nxdim ; if (!( row_idx & 1 )) { row_idx >>= 1 ; i %= nxdim ; column_idx = ( i >> 1 ) ; i = frame_n * frsize + row_idx * xdim + column_idx ; output[ t_idx ] = input [ i ] ; } t_idx += CUDA_MAX_THREADS ; } } // xdim/ydim/frsize are all for the new interpolated data __global__ void d_make_interpolate_420_2 ( int *input, int xdim, int ydim, int frsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int from_row_1, from_row_2, i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / frsize ; i = t_idx % frsize ; row_idx = i / xdim ; if ( row_idx & 1 ) { from_row_1 = row_idx - 1 ; from_row_2 = row_idx + 1 ; column_idx = i % xdim ; if ( from_row_2 == ydim ) { input[ t_idx ] = input [ frame_n * frsize + from_row_1 * xdim + column_idx ] ; } else { input[ t_idx ] = ( input [ frame_n * frsize + from_row_1 * xdim + column_idx ] + input [ frame_n * frsize + from_row_2 * xdim + column_idx ] ) / 2 ; } } t_idx += CUDA_MAX_THREADS ; } } // take care of the columns __global__ void d_make_interpolate_420_3 ( int *input, int xdim, int frsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int from_col_1, from_col_2, i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / frsize ; i = t_idx % frsize ; row_idx = i / xdim ; column_idx = i % xdim ; if ( column_idx & 1 ) { from_col_1 = column_idx - 1 ; from_col_2 = column_idx + 1 ; if ( from_col_2 != xdim ) { input[ t_idx ] = ( input [ frame_n * frsize + row_idx * xdim + from_col_1 ] + input [ frame_n * frsize + row_idx * xdim + from_col_2 ] ) / 2 ; } } t_idx += CUDA_MAX_THREADS ; } } /* input : device addr ... also the output addr ... pls note output : device addr xdim : x dimension of frame ydim : y dimension of frame zdim : z dimension of frame, i.e. temporal scheme : INT_YUV420 currently */ int h_make_interpolate ( int *d_input, int *d_output, int xdim, int ydim, int zdim, int scheme #ifdef CUDA_OBS , int *cudadbgp #endif ) { int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int oframe_size, nframe_size, nn, nBlocks ; switch ( scheme ) { case INT_YUV420 : oframe_size = xdim * ydim ; nframe_size = oframe_size * 4 ; // YUV420 nn = nframe_size * zdim ; // nBlocks = ( nn + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ; h_block_adj ( nn, nThreadsPerBlock, &nBlocks ) ; #ifdef CUDA_DBG fprintf( stderr, "%s: din %p dout %p x/y/z %d %d %d sche %d\n", __func__, d_input, d_output, xdim, ydim, zdim, scheme ) ; #endif d_make_interpolate_420_1 <<< nBlocks, nThreadsPerBlock >>> ( d_input, d_output, xdim, ydim, zdim, oframe_size, xdim << 1, ydim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; hipDeviceSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_1", d_output, nn ) ; #endif d_make_interpolate_420_2 <<< nBlocks, nThreadsPerBlock >>> ( d_output, xdim << 1, ydim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; hipDeviceSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_2", d_output, nn ) ; #endif d_make_interpolate_420_3 <<< nBlocks, nThreadsPerBlock >>> ( d_output, xdim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; hipDeviceSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_3", d_output, nn ) ; #endif break ; default : return ( 0 ) ; } return ( 1 ) ; }
.text .file "cs_interpolate.hip" .globl _Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii # -- Begin function _Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii .p2align 4, 0x90 .type _Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii,@function _Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii: # @_Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii .cfi_startproc # %bb.0: subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) movl %r9d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 4(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) leaq 176(%rsp), %rax movq %rax, 128(%rsp) leaq 184(%rsp), %rax movq %rax, 136(%rsp) leaq 192(%rsp), %rax movq %rax, 144(%rsp) leaq 200(%rsp), %rax movq %rax, 152(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z24d_make_interpolate_420_1PiS_iiiiiiii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $184, %rsp .cfi_adjust_cfa_offset -184 retq .Lfunc_end0: .size _Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii, .Lfunc_end0-_Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii .cfi_endproc # -- End function .globl _Z39__device_stub__d_make_interpolate_420_2Piiiii # -- Begin function _Z39__device_stub__d_make_interpolate_420_2Piiiii .p2align 4, 0x90 .type _Z39__device_stub__d_make_interpolate_420_2Piiiii,@function _Z39__device_stub__d_make_interpolate_420_2Piiiii: # @_Z39__device_stub__d_make_interpolate_420_2Piiiii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movl %esi, 20(%rsp) movl %edx, 16(%rsp) movl %ecx, 12(%rsp) movl %r8d, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 20(%rsp), %rax movq %rax, 88(%rsp) leaq 16(%rsp), %rax movq %rax, 96(%rsp) leaq 12(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z24d_make_interpolate_420_2Piiiii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z39__device_stub__d_make_interpolate_420_2Piiiii, .Lfunc_end1-_Z39__device_stub__d_make_interpolate_420_2Piiiii .cfi_endproc # -- End function .globl _Z39__device_stub__d_make_interpolate_420_3Piiii # -- Begin function _Z39__device_stub__d_make_interpolate_420_3Piiii .p2align 4, 0x90 .type _Z39__device_stub__d_make_interpolate_420_3Piiii,@function _Z39__device_stub__d_make_interpolate_420_3Piiii: # @_Z39__device_stub__d_make_interpolate_420_3Piiii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movl %esi, 20(%rsp) movl %edx, 16(%rsp) movl %ecx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 20(%rsp), %rax movq %rax, 88(%rsp) leaq 16(%rsp), %rax movq %rax, 96(%rsp) leaq 12(%rsp), %rax movq %rax, 104(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z24d_make_interpolate_420_3Piiii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end2: .size _Z39__device_stub__d_make_interpolate_420_3Piiii, .Lfunc_end2-_Z39__device_stub__d_make_interpolate_420_3Piiii .cfi_endproc # -- End function .globl _Z18h_make_interpolatePiS_iiii # -- Begin function _Z18h_make_interpolatePiS_iiii .p2align 4, 0x90 .type _Z18h_make_interpolatePiS_iiii,@function _Z18h_make_interpolatePiS_iiii: # @_Z18h_make_interpolatePiS_iiii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $216, %rsp .cfi_def_cfa_offset 272 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 xorl %r13d, %r13d cmpl $3, %r9d jne .LBB3_8 # %bb.1: movl %ecx, %r15d movl %edx, %ebx movq %rsi, 120(%rsp) # 8-byte Spill movq %rdi, 208(%rsp) # 8-byte Spill movabsq $4294967296, %r14 # imm = 0x100000000 movl %ecx, %r12d imull %edx, %r12d leal (,%r12,4), %ebp movl %ebp, 20(%rsp) # 4-byte Spill movl %r8d, %r13d imull %r8d, %ebp leaq 24(%rsp), %rdx movl %ebp, %edi movl $512, %esi # imm = 0x200 callq _Z11h_block_adjiiPi movl 24(%rsp), %edi orq %r14, %rdi leaq 512(%r14), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_3 # %bb.2: leal (%rbx,%rbx), %eax leal (%r15,%r15), %ecx movq 208(%rsp), %rdx # 8-byte Reload movq %rdx, 88(%rsp) movq 120(%rsp), %rdx # 8-byte Reload movq %rdx, 48(%rsp) movl %ebx, 16(%rsp) movl %r15d, 12(%rsp) movl %r13d, 28(%rsp) movl %r12d, 116(%rsp) movl %eax, 112(%rsp) movl %ecx, 108(%rsp) movl 20(%rsp), %eax # 4-byte Reload movl %eax, 104(%rsp) movl %ebp, 100(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 48(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) leaq 28(%rsp), %rax movq %rax, 160(%rsp) leaq 116(%rsp), %rax movq %rax, 168(%rsp) leaq 112(%rsp), %rax movq %rax, 176(%rsp) leaq 108(%rsp), %rax movq %rax, 184(%rsp) leaq 104(%rsp), %rax movq %rax, 192(%rsp) leaq 100(%rsp), %rax movq %rax, 200(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 40(%rsp), %rdx leaq 32(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z24d_make_interpolate_420_1PiS_iiiiiiii, %edi pushq 32(%rsp) .cfi_adjust_cfa_offset 8 pushq 48(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_3: callq hipDeviceSynchronize movl 24(%rsp), %edi movabsq $4294967296, %r13 # imm = 0x100000000 orq %r13, %rdi leaq 512(%r13), %r12 movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax movq 120(%rsp), %r14 # 8-byte Reload jne .LBB3_5 # %bb.4: leal (%rbx,%rbx), %eax addl %r15d, %r15d movq %r14, 88(%rsp) movl %eax, 32(%rsp) movl %r15d, 16(%rsp) movl 20(%rsp), %eax # 4-byte Reload movl %eax, 12(%rsp) movl %ebp, 28(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 32(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) leaq 28(%rsp), %rax movq %rax, 160(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z24d_make_interpolate_420_2Piiiii, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_5: callq hipDeviceSynchronize movl 24(%rsp), %edi orq %r13, %rdi movl $1, %r13d movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_7 # %bb.6: addl %ebx, %ebx movq %r14, 88(%rsp) movl %ebx, 32(%rsp) movl 20(%rsp), %eax # 4-byte Reload movl %eax, 16(%rsp) movl %ebp, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 32(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z24d_make_interpolate_420_3Piiii, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_7: callq hipDeviceSynchronize .LBB3_8: movl %r13d, %eax addq $216, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size _Z18h_make_interpolatePiS_iiii, .Lfunc_end3-_Z18h_make_interpolatePiS_iiii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z24d_make_interpolate_420_1PiS_iiiiiiii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z24d_make_interpolate_420_2Piiiii, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z24d_make_interpolate_420_3Piiii, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z24d_make_interpolate_420_1PiS_iiiiiiii,@object # @_Z24d_make_interpolate_420_1PiS_iiiiiiii .section .rodata,"a",@progbits .globl _Z24d_make_interpolate_420_1PiS_iiiiiiii .p2align 3, 0x0 _Z24d_make_interpolate_420_1PiS_iiiiiiii: .quad _Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii .size _Z24d_make_interpolate_420_1PiS_iiiiiiii, 8 .type _Z24d_make_interpolate_420_2Piiiii,@object # @_Z24d_make_interpolate_420_2Piiiii .globl _Z24d_make_interpolate_420_2Piiiii .p2align 3, 0x0 _Z24d_make_interpolate_420_2Piiiii: .quad _Z39__device_stub__d_make_interpolate_420_2Piiiii .size _Z24d_make_interpolate_420_2Piiiii, 8 .type _Z24d_make_interpolate_420_3Piiii,@object # @_Z24d_make_interpolate_420_3Piiii .globl _Z24d_make_interpolate_420_3Piiii .p2align 3, 0x0 _Z24d_make_interpolate_420_3Piiii: .quad _Z39__device_stub__d_make_interpolate_420_3Piiii .size _Z24d_make_interpolate_420_3Piiii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z24d_make_interpolate_420_1PiS_iiiiiiii" .size .L__unnamed_1, 41 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z24d_make_interpolate_420_2Piiiii" .size .L__unnamed_2, 35 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z24d_make_interpolate_420_3Piiii" .size .L__unnamed_3, 34 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii .addrsig_sym _Z39__device_stub__d_make_interpolate_420_2Piiiii .addrsig_sym _Z39__device_stub__d_make_interpolate_420_3Piiii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z24d_make_interpolate_420_1PiS_iiiiiiii .addrsig_sym _Z24d_make_interpolate_420_2Piiiii .addrsig_sym _Z24d_make_interpolate_420_3Piiii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00075439_00000000-6_cs_interpolate.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z54__device_stub__Z24d_make_interpolate_420_1PiS_iiiiiiiiPiS_iiiiiiii .type _Z54__device_stub__Z24d_make_interpolate_420_1PiS_iiiiiiiiPiS_iiiiiiii, @function _Z54__device_stub__Z24d_make_interpolate_420_1PiS_iiiiiiiiPiS_iiiiiiii: .LFB2082: .cfi_startproc endbr64 subq $200, %rsp .cfi_def_cfa_offset 208 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) movl %r9d, (%rsp) movq %fs:40, %rax movq %rax, 184(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 4(%rsp), %rax movq %rax, 128(%rsp) movq %rsp, %rax movq %rax, 136(%rsp) leaq 208(%rsp), %rax movq %rax, 144(%rsp) leaq 216(%rsp), %rax movq %rax, 152(%rsp) leaq 224(%rsp), %rax movq %rax, 160(%rsp) leaq 232(%rsp), %rax movq %rax, 168(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 184(%rsp), %rax subq %fs:40, %rax jne .L8 addq $200, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 216 pushq 40(%rsp) .cfi_def_cfa_offset 224 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z24d_make_interpolate_420_1PiS_iiiiiiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 208 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z54__device_stub__Z24d_make_interpolate_420_1PiS_iiiiiiiiPiS_iiiiiiii, .-_Z54__device_stub__Z24d_make_interpolate_420_1PiS_iiiiiiiiPiS_iiiiiiii .globl _Z24d_make_interpolate_420_1PiS_iiiiiiii .type _Z24d_make_interpolate_420_1PiS_iiiiiiii, @function _Z24d_make_interpolate_420_1PiS_iiiiiiii: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movl 40(%rsp), %eax pushq %rax .cfi_def_cfa_offset 24 movl 40(%rsp), %eax pushq %rax .cfi_def_cfa_offset 32 movl 40(%rsp), %eax pushq %rax .cfi_def_cfa_offset 40 movl 40(%rsp), %eax pushq %rax .cfi_def_cfa_offset 48 call _Z54__device_stub__Z24d_make_interpolate_420_1PiS_iiiiiiiiPiS_iiiiiiii addq $40, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z24d_make_interpolate_420_1PiS_iiiiiiii, .-_Z24d_make_interpolate_420_1PiS_iiiiiiii .globl _Z48__device_stub__Z24d_make_interpolate_420_2PiiiiiPiiiii .type _Z48__device_stub__Z24d_make_interpolate_420_2PiiiiiPiiiii, @function _Z48__device_stub__Z24d_make_interpolate_420_2PiiiiiPiiiii: .LFB2084: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movl %edx, 16(%rsp) movl %ecx, 12(%rsp) movl %r8d, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 12(%rsp), %rax movq %rax, 120(%rsp) leaq 8(%rsp), %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z24d_make_interpolate_420_2Piiiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z48__device_stub__Z24d_make_interpolate_420_2PiiiiiPiiiii, .-_Z48__device_stub__Z24d_make_interpolate_420_2PiiiiiPiiiii .globl _Z24d_make_interpolate_420_2Piiiii .type _Z24d_make_interpolate_420_2Piiiii, @function _Z24d_make_interpolate_420_2Piiiii: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z48__device_stub__Z24d_make_interpolate_420_2PiiiiiPiiiii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z24d_make_interpolate_420_2Piiiii, .-_Z24d_make_interpolate_420_2Piiiii .globl _Z47__device_stub__Z24d_make_interpolate_420_3PiiiiPiiii .type _Z47__device_stub__Z24d_make_interpolate_420_3PiiiiPiiii, @function _Z47__device_stub__Z24d_make_interpolate_420_3PiiiiPiiii: .LFB2086: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movl %edx, 16(%rsp) movl %ecx, 12(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 12(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L23 .L19: movq 136(%rsp), %rax subq %fs:40, %rax jne .L24 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z24d_make_interpolate_420_3Piiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L19 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE2086: .size _Z47__device_stub__Z24d_make_interpolate_420_3PiiiiPiiii, .-_Z47__device_stub__Z24d_make_interpolate_420_3PiiiiPiiii .globl _Z24d_make_interpolate_420_3Piiii .type _Z24d_make_interpolate_420_3Piiii, @function _Z24d_make_interpolate_420_3Piiii: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z47__device_stub__Z24d_make_interpolate_420_3PiiiiPiiii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _Z24d_make_interpolate_420_3Piiii, .-_Z24d_make_interpolate_420_3Piiii .globl _Z18h_make_interpolatePiS_iiii .type _Z18h_make_interpolatePiS_iiii, @function _Z18h_make_interpolatePiS_iiii: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $72, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax cmpl $3, %r9d je .L35 .L27: movq 56(%rsp), %rdx subq %fs:40, %rdx jne .L36 addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L35: .cfi_restore_state movl %edx, %ebx movl %ecx, %ebp movl %r8d, %r12d movl %edx, %r15d imull %ecx, %r15d leal 0(,%r15,4), %r13d movl %r13d, %r14d imull %r8d, %r14d leaq 28(%rsp), %rdx movl $512, %esi movl %r14d, %edi call _Z11h_block_adjiiPi@PLT movl $512, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl 28(%rsp), %eax movl %eax, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L37 .L29: call cudaThreadSynchronize@PLT movl $512, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl 28(%rsp), %eax movl %eax, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L38 .L30: call cudaThreadSynchronize@PLT movl $512, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl 28(%rsp), %eax movl %eax, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L39 .L31: call cudaThreadSynchronize@PLT movl $1, %eax jmp .L27 .L37: pushq %r14 .cfi_def_cfa_offset 136 pushq %r13 .cfi_def_cfa_offset 144 leal (%rbp,%rbp), %eax pushq %rax .cfi_def_cfa_offset 152 leal (%rbx,%rbx), %eax pushq %rax .cfi_def_cfa_offset 160 movl %r15d, %r9d movl %r12d, %r8d movl %ebp, %ecx movl %ebx, %edx movq 32(%rsp), %rsi movq 40(%rsp), %rdi call _Z54__device_stub__Z24d_make_interpolate_420_1PiS_iiiiiiiiPiS_iiiiiiii addq $32, %rsp .cfi_def_cfa_offset 128 jmp .L29 .L38: leal (%rbp,%rbp), %edx leal (%rbx,%rbx), %esi movl %r14d, %r8d movl %r13d, %ecx movq (%rsp), %rdi call _Z48__device_stub__Z24d_make_interpolate_420_2PiiiiiPiiiii jmp .L30 .L39: leal (%rbx,%rbx), %esi movl %r14d, %ecx movl %r13d, %edx movq (%rsp), %rdi call _Z47__device_stub__Z24d_make_interpolate_420_3PiiiiPiiii jmp .L31 .L36: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z18h_make_interpolatePiS_iiii, .-_Z18h_make_interpolatePiS_iiii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z24d_make_interpolate_420_3Piiii" .align 8 .LC1: .string "_Z24d_make_interpolate_420_2Piiiii" .align 8 .LC2: .string "_Z24d_make_interpolate_420_1PiS_iiiiiiii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2089: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z24d_make_interpolate_420_3Piiii(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z24d_make_interpolate_420_2Piiiii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z24d_make_interpolate_420_1PiS_iiiiiiii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "cs_interpolate.hip" .globl _Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii # -- Begin function _Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii .p2align 4, 0x90 .type _Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii,@function _Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii: # @_Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii .cfi_startproc # %bb.0: subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) movl %r9d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 4(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) leaq 176(%rsp), %rax movq %rax, 128(%rsp) leaq 184(%rsp), %rax movq %rax, 136(%rsp) leaq 192(%rsp), %rax movq %rax, 144(%rsp) leaq 200(%rsp), %rax movq %rax, 152(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z24d_make_interpolate_420_1PiS_iiiiiiii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $184, %rsp .cfi_adjust_cfa_offset -184 retq .Lfunc_end0: .size _Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii, .Lfunc_end0-_Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii .cfi_endproc # -- End function .globl _Z39__device_stub__d_make_interpolate_420_2Piiiii # -- Begin function _Z39__device_stub__d_make_interpolate_420_2Piiiii .p2align 4, 0x90 .type _Z39__device_stub__d_make_interpolate_420_2Piiiii,@function _Z39__device_stub__d_make_interpolate_420_2Piiiii: # @_Z39__device_stub__d_make_interpolate_420_2Piiiii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movl %esi, 20(%rsp) movl %edx, 16(%rsp) movl %ecx, 12(%rsp) movl %r8d, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 20(%rsp), %rax movq %rax, 88(%rsp) leaq 16(%rsp), %rax movq %rax, 96(%rsp) leaq 12(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z24d_make_interpolate_420_2Piiiii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z39__device_stub__d_make_interpolate_420_2Piiiii, .Lfunc_end1-_Z39__device_stub__d_make_interpolate_420_2Piiiii .cfi_endproc # -- End function .globl _Z39__device_stub__d_make_interpolate_420_3Piiii # -- Begin function _Z39__device_stub__d_make_interpolate_420_3Piiii .p2align 4, 0x90 .type _Z39__device_stub__d_make_interpolate_420_3Piiii,@function _Z39__device_stub__d_make_interpolate_420_3Piiii: # @_Z39__device_stub__d_make_interpolate_420_3Piiii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movl %esi, 20(%rsp) movl %edx, 16(%rsp) movl %ecx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 20(%rsp), %rax movq %rax, 88(%rsp) leaq 16(%rsp), %rax movq %rax, 96(%rsp) leaq 12(%rsp), %rax movq %rax, 104(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z24d_make_interpolate_420_3Piiii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end2: .size _Z39__device_stub__d_make_interpolate_420_3Piiii, .Lfunc_end2-_Z39__device_stub__d_make_interpolate_420_3Piiii .cfi_endproc # -- End function .globl _Z18h_make_interpolatePiS_iiii # -- Begin function _Z18h_make_interpolatePiS_iiii .p2align 4, 0x90 .type _Z18h_make_interpolatePiS_iiii,@function _Z18h_make_interpolatePiS_iiii: # @_Z18h_make_interpolatePiS_iiii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $216, %rsp .cfi_def_cfa_offset 272 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 xorl %r13d, %r13d cmpl $3, %r9d jne .LBB3_8 # %bb.1: movl %ecx, %r15d movl %edx, %ebx movq %rsi, 120(%rsp) # 8-byte Spill movq %rdi, 208(%rsp) # 8-byte Spill movabsq $4294967296, %r14 # imm = 0x100000000 movl %ecx, %r12d imull %edx, %r12d leal (,%r12,4), %ebp movl %ebp, 20(%rsp) # 4-byte Spill movl %r8d, %r13d imull %r8d, %ebp leaq 24(%rsp), %rdx movl %ebp, %edi movl $512, %esi # imm = 0x200 callq _Z11h_block_adjiiPi movl 24(%rsp), %edi orq %r14, %rdi leaq 512(%r14), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_3 # %bb.2: leal (%rbx,%rbx), %eax leal (%r15,%r15), %ecx movq 208(%rsp), %rdx # 8-byte Reload movq %rdx, 88(%rsp) movq 120(%rsp), %rdx # 8-byte Reload movq %rdx, 48(%rsp) movl %ebx, 16(%rsp) movl %r15d, 12(%rsp) movl %r13d, 28(%rsp) movl %r12d, 116(%rsp) movl %eax, 112(%rsp) movl %ecx, 108(%rsp) movl 20(%rsp), %eax # 4-byte Reload movl %eax, 104(%rsp) movl %ebp, 100(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 48(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) leaq 28(%rsp), %rax movq %rax, 160(%rsp) leaq 116(%rsp), %rax movq %rax, 168(%rsp) leaq 112(%rsp), %rax movq %rax, 176(%rsp) leaq 108(%rsp), %rax movq %rax, 184(%rsp) leaq 104(%rsp), %rax movq %rax, 192(%rsp) leaq 100(%rsp), %rax movq %rax, 200(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 40(%rsp), %rdx leaq 32(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z24d_make_interpolate_420_1PiS_iiiiiiii, %edi pushq 32(%rsp) .cfi_adjust_cfa_offset 8 pushq 48(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_3: callq hipDeviceSynchronize movl 24(%rsp), %edi movabsq $4294967296, %r13 # imm = 0x100000000 orq %r13, %rdi leaq 512(%r13), %r12 movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax movq 120(%rsp), %r14 # 8-byte Reload jne .LBB3_5 # %bb.4: leal (%rbx,%rbx), %eax addl %r15d, %r15d movq %r14, 88(%rsp) movl %eax, 32(%rsp) movl %r15d, 16(%rsp) movl 20(%rsp), %eax # 4-byte Reload movl %eax, 12(%rsp) movl %ebp, 28(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 32(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) leaq 28(%rsp), %rax movq %rax, 160(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z24d_make_interpolate_420_2Piiiii, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_5: callq hipDeviceSynchronize movl 24(%rsp), %edi orq %r13, %rdi movl $1, %r13d movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_7 # %bb.6: addl %ebx, %ebx movq %r14, 88(%rsp) movl %ebx, 32(%rsp) movl 20(%rsp), %eax # 4-byte Reload movl %eax, 16(%rsp) movl %ebp, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 32(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z24d_make_interpolate_420_3Piiii, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_7: callq hipDeviceSynchronize .LBB3_8: movl %r13d, %eax addq $216, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size _Z18h_make_interpolatePiS_iiii, .Lfunc_end3-_Z18h_make_interpolatePiS_iiii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z24d_make_interpolate_420_1PiS_iiiiiiii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z24d_make_interpolate_420_2Piiiii, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z24d_make_interpolate_420_3Piiii, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z24d_make_interpolate_420_1PiS_iiiiiiii,@object # @_Z24d_make_interpolate_420_1PiS_iiiiiiii .section .rodata,"a",@progbits .globl _Z24d_make_interpolate_420_1PiS_iiiiiiii .p2align 3, 0x0 _Z24d_make_interpolate_420_1PiS_iiiiiiii: .quad _Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii .size _Z24d_make_interpolate_420_1PiS_iiiiiiii, 8 .type _Z24d_make_interpolate_420_2Piiiii,@object # @_Z24d_make_interpolate_420_2Piiiii .globl _Z24d_make_interpolate_420_2Piiiii .p2align 3, 0x0 _Z24d_make_interpolate_420_2Piiiii: .quad _Z39__device_stub__d_make_interpolate_420_2Piiiii .size _Z24d_make_interpolate_420_2Piiiii, 8 .type _Z24d_make_interpolate_420_3Piiii,@object # @_Z24d_make_interpolate_420_3Piiii .globl _Z24d_make_interpolate_420_3Piiii .p2align 3, 0x0 _Z24d_make_interpolate_420_3Piiii: .quad _Z39__device_stub__d_make_interpolate_420_3Piiii .size _Z24d_make_interpolate_420_3Piiii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z24d_make_interpolate_420_1PiS_iiiiiiii" .size .L__unnamed_1, 41 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z24d_make_interpolate_420_2Piiiii" .size .L__unnamed_2, 35 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z24d_make_interpolate_420_3Piiii" .size .L__unnamed_3, 34 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z39__device_stub__d_make_interpolate_420_1PiS_iiiiiiii .addrsig_sym _Z39__device_stub__d_make_interpolate_420_2Piiiii .addrsig_sym _Z39__device_stub__d_make_interpolate_420_3Piiii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z24d_make_interpolate_420_1PiS_iiiiiiii .addrsig_sym _Z24d_make_interpolate_420_2Piiiii .addrsig_sym _Z24d_make_interpolate_420_3Piiii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* * The names of the kernels are fairly self explanatory * about the task that they are performing. You * may refer to the Arnoldi algorithm on page 4 of the * attached paper to understand the flow. */ #include <cuda_runtime.h> #include <stdio.h> #include <cuda.h> __global__ void assign_Q(double* d_Q, double* d_q, int j, int k, int n) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n) { d_Q[j+(id*k)] = d_q[id]; } } __global__ void update_q(double* d_q, double* d_Q, int i, int col, int n) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n) { d_q[id] = d_Q[id*col + i]; } } __global__ void update_z(double* d_z, double*d_q, double* temp, int n) { __shared__ double t; if(threadIdx.x == 0) t = *temp; __syncthreads(); int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n) { d_z[id] -= t * d_q[id]; } } __global__ void dot_prod_assign_H(double* a, double*b, int n, double* temp, double* d_H, int i, int j, int k) { double t=0; for(int i=0; i<n; i++) t += a[i]*b[i]; if(i==j) d_H[i + j*(k+1)] = t-1; else d_H[i + j*(k+1)] = t; *temp = t; } __global__ void saxpy(double *A, int n, double* z, double* q) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id < n){ double temp = 0; int i = id*n, j = i+n , count=-1; for(; i<j; i++) temp += A[i]*q[++count]; z[id] = temp; } } __global__ void normalize_1(double* q, double* temp, int n, double* d_H, int loc) { double sum = 0; for(int i=0; i<n; i++) { double f = q[i]; sum += f*f; } *temp = sqrt(sum); d_H[loc] = *temp; } __global__ void normalize_2(double* q, double* temp, int n) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n){ q[id] /= *temp ; } } void normalize_assign_H(double *q, double* temp, int n, double* d_H, int loc) { normalize_1<<<1, 1>>>(q, temp, n, d_H, loc); normalize_2<<<n/1024 + 1, 1024>>>(q, temp, n); } __global__ void printMat(double* A, int row, int col) { printf("Printing matrix\n"); int z; for(z=0; z< (row*col); z++) { printf("%f ", A[z]); if((z+1)%col == 0) printf("\n"); } } void parallelArnoldi(double* d_A, double* d_q, int k, double* d_Q, double* d_H, int n, double* d_z, double* temp) { cudaError_t err = cudaSuccess; assign_Q<<<n/1024 + 1, 1024>>>(d_Q, d_q, 0, k+1, n); for(int j=0; j<k; j++) { saxpy<<<n/1024 + 1, 1024>>>(d_A, n, d_z, d_q); for(int i=0; i<=j; i++) { update_q<<<n/1024 + 1, 1024>>>(d_q, d_Q, i, k+1, n); dot_prod_assign_H<<< 1, 1>>>(d_q, d_z, n, temp, d_H, i, j, k); update_z<<<n/1024 + 1, 1024>>>(d_z, d_q, temp, n); } normalize_assign_H(d_z, temp, n, d_H, j+1 + j*(k+1)); cudaMemcpy(d_q, d_z, sizeof(double)*n, cudaMemcpyDeviceToDevice); assign_Q<<<n/1024 + 1, 1024>>>(d_Q, d_q, j+1, k+1, n); } if (err != cudaSuccess) printf("Error above: %s\n", cudaGetErrorString(err)); /* printf("H\n"); printMat<<<1, 1>>>(d_H, k+1, k); cudaDeviceSynchronize(); printf("Q\n"); printMat<<<1, 1>>>(d_Q, n, k+1); cudaDeviceSynchronize(); */ }
.file "tmpxft_000fc265_00000000-6_p_arnoldi.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z32__device_stub__Z8assign_QPdS_iiiPdS_iii .type _Z32__device_stub__Z8assign_QPdS_iiiPdS_iii, @function _Z32__device_stub__Z8assign_QPdS_iiiPdS_iii: .LFB2083: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 4(%rsp), %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8assign_QPdS_iii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z32__device_stub__Z8assign_QPdS_iiiPdS_iii, .-_Z32__device_stub__Z8assign_QPdS_iiiPdS_iii .globl _Z8assign_QPdS_iii .type _Z8assign_QPdS_iii, @function _Z8assign_QPdS_iii: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z8assign_QPdS_iiiPdS_iii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z8assign_QPdS_iii, .-_Z8assign_QPdS_iii .globl _Z32__device_stub__Z8update_qPdS_iiiPdS_iii .type _Z32__device_stub__Z8update_qPdS_iiiPdS_iii, @function _Z32__device_stub__Z8update_qPdS_iiiPdS_iii: .LFB2085: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 4(%rsp), %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8update_qPdS_iii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2085: .size _Z32__device_stub__Z8update_qPdS_iiiPdS_iii, .-_Z32__device_stub__Z8update_qPdS_iiiPdS_iii .globl _Z8update_qPdS_iii .type _Z8update_qPdS_iii, @function _Z8update_qPdS_iii: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z8update_qPdS_iiiPdS_iii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _Z8update_qPdS_iii, .-_Z8update_qPdS_iii .globl _Z32__device_stub__Z8update_zPdS_S_iPdS_S_i .type _Z32__device_stub__Z8update_zPdS_S_iPdS_S_i, @function _Z32__device_stub__Z8update_zPdS_S_iPdS_S_i: .LFB2087: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L23 .L19: movq 136(%rsp), %rax subq %fs:40, %rax jne .L24 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8update_zPdS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L19 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE2087: .size _Z32__device_stub__Z8update_zPdS_S_iPdS_S_i, .-_Z32__device_stub__Z8update_zPdS_S_iPdS_S_i .globl _Z8update_zPdS_S_i .type _Z8update_zPdS_S_i, @function _Z8update_zPdS_S_i: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z8update_zPdS_S_iPdS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _Z8update_zPdS_S_i, .-_Z8update_zPdS_S_i .globl _Z47__device_stub__Z17dot_prod_assign_HPdS_iS_S_iiiPdS_iS_S_iii .type _Z47__device_stub__Z17dot_prod_assign_HPdS_iS_S_iiiPdS_iS_S_iii, @function _Z47__device_stub__Z17dot_prod_assign_HPdS_iS_S_iiiPdS_iS_S_iii: .LFB2089: .cfi_startproc endbr64 subq $200, %rsp .cfi_def_cfa_offset 208 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movl %edx, 28(%rsp) movq %rcx, 16(%rsp) movq %r8, 8(%rsp) movl %r9d, 24(%rsp) movq %fs:40, %rax movq %rax, 184(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 28(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 8(%rsp), %rax movq %rax, 144(%rsp) leaq 24(%rsp), %rax movq %rax, 152(%rsp) leaq 208(%rsp), %rax movq %rax, 160(%rsp) leaq 216(%rsp), %rax movq %rax, 168(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L31 .L27: movq 184(%rsp), %rax subq %fs:40, %rax jne .L32 addq $200, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L31: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 216 pushq 56(%rsp) .cfi_def_cfa_offset 224 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z17dot_prod_assign_HPdS_iS_S_iii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 208 jmp .L27 .L32: call __stack_chk_fail@PLT .cfi_endproc .LFE2089: .size _Z47__device_stub__Z17dot_prod_assign_HPdS_iS_S_iiiPdS_iS_S_iii, .-_Z47__device_stub__Z17dot_prod_assign_HPdS_iS_S_iiiPdS_iS_S_iii .globl _Z17dot_prod_assign_HPdS_iS_S_iii .type _Z17dot_prod_assign_HPdS_iS_S_iii, @function _Z17dot_prod_assign_HPdS_iS_S_iii: .LFB2090: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movl 24(%rsp), %eax pushq %rax .cfi_def_cfa_offset 24 movl 24(%rsp), %eax pushq %rax .cfi_def_cfa_offset 32 call _Z47__device_stub__Z17dot_prod_assign_HPdS_iS_S_iiiPdS_iS_S_iii addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2090: .size _Z17dot_prod_assign_HPdS_iS_S_iii, .-_Z17dot_prod_assign_HPdS_iS_S_iii .globl _Z29__device_stub__Z5saxpyPdiS_S_PdiS_S_ .type _Z29__device_stub__Z5saxpyPdiS_S_PdiS_S_, @function _Z29__device_stub__Z5saxpyPdiS_S_PdiS_S_: .LFB2091: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L39 .L35: movq 136(%rsp), %rax subq %fs:40, %rax jne .L40 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L39: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z5saxpyPdiS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L35 .L40: call __stack_chk_fail@PLT .cfi_endproc .LFE2091: .size _Z29__device_stub__Z5saxpyPdiS_S_PdiS_S_, .-_Z29__device_stub__Z5saxpyPdiS_S_PdiS_S_ .globl _Z5saxpyPdiS_S_ .type _Z5saxpyPdiS_S_, @function _Z5saxpyPdiS_S_: .LFB2092: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z5saxpyPdiS_S_PdiS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2092: .size _Z5saxpyPdiS_S_, .-_Z5saxpyPdiS_S_ .globl _Z37__device_stub__Z11normalize_1PdS_iS_iPdS_iS_i .type _Z37__device_stub__Z11normalize_1PdS_iS_iPdS_iS_i, @function _Z37__device_stub__Z11normalize_1PdS_iS_iPdS_iS_i: .LFB2093: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %rcx, (%rsp) movl %r8d, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) leaq 8(%rsp), %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L47 .L43: movq 136(%rsp), %rax subq %fs:40, %rax jne .L48 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L47: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11normalize_1PdS_iS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L43 .L48: call __stack_chk_fail@PLT .cfi_endproc .LFE2093: .size _Z37__device_stub__Z11normalize_1PdS_iS_iPdS_iS_i, .-_Z37__device_stub__Z11normalize_1PdS_iS_iPdS_iS_i .globl _Z11normalize_1PdS_iS_i .type _Z11normalize_1PdS_iS_i, @function _Z11normalize_1PdS_iS_i: .LFB2094: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z11normalize_1PdS_iS_iPdS_iS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2094: .size _Z11normalize_1PdS_iS_i, .-_Z11normalize_1PdS_iS_i .globl _Z34__device_stub__Z11normalize_2PdS_iPdS_i .type _Z34__device_stub__Z11normalize_2PdS_iPdS_i, @function _Z34__device_stub__Z11normalize_2PdS_iPdS_i: .LFB2095: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L55 .L51: movq 120(%rsp), %rax subq %fs:40, %rax jne .L56 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L55: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11normalize_2PdS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L51 .L56: call __stack_chk_fail@PLT .cfi_endproc .LFE2095: .size _Z34__device_stub__Z11normalize_2PdS_iPdS_i, .-_Z34__device_stub__Z11normalize_2PdS_iPdS_i .globl _Z11normalize_2PdS_i .type _Z11normalize_2PdS_i, @function _Z11normalize_2PdS_i: .LFB2096: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z34__device_stub__Z11normalize_2PdS_iPdS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2096: .size _Z11normalize_2PdS_i, .-_Z11normalize_2PdS_i .globl _Z18normalize_assign_HPdS_iS_i .type _Z18normalize_assign_HPdS_iS_i, @function _Z18normalize_assign_HPdS_iS_i: .LFB2057: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $32, %rsp .cfi_def_cfa_offset 80 movq %rdi, %rbp movq %rsi, %r12 movl %edx, %ebx movq %rcx, %r13 movl %r8d, %r14d movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 8(%rsp) movl $1, 12(%rsp) movl $1, 16(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L63 .L60: movl $1024, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) leal 1023(%rbx), %eax testl %ebx, %ebx cmovns %ebx, %eax sarl $10, %eax addl $1, %eax movl %eax, 8(%rsp) movl $1, 12(%rsp) movl $1, 16(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L64 .L59: addq $32, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L63: .cfi_restore_state movl %r14d, %r8d movq %r13, %rcx movl %ebx, %edx movq %r12, %rsi movq %rbp, %rdi call _Z37__device_stub__Z11normalize_1PdS_iS_iPdS_iS_i jmp .L60 .L64: movl %ebx, %edx movq %r12, %rsi movq %rbp, %rdi call _Z34__device_stub__Z11normalize_2PdS_iPdS_i jmp .L59 .cfi_endproc .LFE2057: .size _Z18normalize_assign_HPdS_iS_i, .-_Z18normalize_assign_HPdS_iS_i .globl _Z15parallelArnoldiPdS_iS_S_iS_S_ .type _Z15parallelArnoldiPdS_iS_S_iS_S_, @function _Z15parallelArnoldiPdS_iS_S_iS_S_: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $88, %rsp .cfi_def_cfa_offset 144 movq %rdi, 40(%rsp) movq %rsi, %r14 movl %edx, 16(%rsp) movq %rcx, 8(%rsp) movq %r8, (%rsp) movl %r9d, %r13d movq 144(%rsp), %r15 movl $1024, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) leal 1023(%r9), %ebp testl %r9d, %r9d cmovns %r9d, %ebp sarl $10, %ebp addl $1, %ebp movl %ebp, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $0, %r9d movl $0, %r8d movq 68(%rsp), %rdx movl $1, %ecx movq 56(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L79 .L66: movl 16(%rsp), %ebx testl %ebx, %ebx jle .L65 movslq %r13d, %rax salq $3, %rax movq %rax, 24(%rsp) leal 2(%rbx), %eax movl %eax, 36(%rsp) movl $1, 32(%rsp) movl $0, %r12d leal 1(%rbx), %eax movl %eax, 20(%rsp) jmp .L75 .L79: movl 16(%rsp), %eax leal 1(%rax), %ecx movl %r13d, %r8d movl $0, %edx movq %r14, %rsi movq 8(%rsp), %rdi call _Z32__device_stub__Z8assign_QPdS_iiiPdS_iii jmp .L66 .L83: movq %r14, %rcx movq %r15, %rdx movl %r13d, %esi movq 40(%rsp), %rdi call _Z29__device_stub__Z5saxpyPdiS_S_PdiS_S_ jmp .L68 .L70: movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $0, %r9d movl $0, %r8d movq 68(%rsp), %rdx movl $1, %ecx movq 56(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L80 .L71: movl $1024, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl %ebp, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $0, %r9d movl $0, %r8d movq 68(%rsp), %rdx movl $1, %ecx movq 56(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L81 .L72: addl $1, %ebx cmpl %ebx, %r12d jl .L69 .L73: movl $1024, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl %ebp, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $0, %r9d movl $0, %r8d movq 68(%rsp), %rdx movl $1, %ecx movq 56(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L70 movl %r13d, %r8d movl 20(%rsp), %ecx movl %ebx, %edx movq 8(%rsp), %rsi movq %r14, %rdi call _Z32__device_stub__Z8update_qPdS_iiiPdS_iii jmp .L70 .L80: movl 16(%rsp), %eax pushq %rax .cfi_def_cfa_offset 152 pushq %r12 .cfi_def_cfa_offset 160 movl %ebx, %r9d movq 16(%rsp), %r8 movq 168(%rsp), %rcx movl %r13d, %edx movq %r15, %rsi movq %r14, %rdi call _Z47__device_stub__Z17dot_prod_assign_HPdS_iS_S_iiiPdS_iS_S_iii addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L71 .L81: movl %r13d, %ecx movq 152(%rsp), %rdx movq %r14, %rsi movq %r15, %rdi call _Z32__device_stub__Z8update_zPdS_S_iPdS_S_i jmp .L72 .L69: addl $1, %r12d movl 32(%rsp), %r8d movq (%rsp), %rcx movl %r13d, %edx movq 152(%rsp), %rsi movq %r15, %rdi call _Z18normalize_assign_HPdS_iS_i movl $3, %ecx movq 24(%rsp), %rdx movq %r15, %rsi movq %r14, %rdi call cudaMemcpy@PLT movl $1024, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl %ebp, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $0, %r9d movl $0, %r8d movq 68(%rsp), %rdx movl $1, %ecx movq 56(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L82 .L74: movl 36(%rsp), %ebx addl %ebx, 32(%rsp) movl 16(%rsp), %eax cmpl %eax, %r12d je .L65 .L75: movl $1024, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl %ebp, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $0, %r9d movl $0, %r8d movq 68(%rsp), %rdx movl $1, %ecx movq 56(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L83 .L68: testl %r12d, %r12d js .L69 movl $0, %ebx jmp .L73 .L82: movl %r13d, %r8d movl 20(%rsp), %ecx movl %r12d, %edx movq %r14, %rsi movq 8(%rsp), %rdi call _Z32__device_stub__Z8assign_QPdS_iiiPdS_iii jmp .L74 .L65: addq $88, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _Z15parallelArnoldiPdS_iS_S_iS_S_, .-_Z15parallelArnoldiPdS_iS_S_iS_S_ .globl _Z29__device_stub__Z8printMatPdiiPdii .type _Z29__device_stub__Z8printMatPdiiPdii, @function _Z29__device_stub__Z8printMatPdiiPdii: .LFB2097: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movq %rsp, %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L88 .L84: movq 104(%rsp), %rax subq %fs:40, %rax jne .L89 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L88: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z8printMatPdii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L84 .L89: call __stack_chk_fail@PLT .cfi_endproc .LFE2097: .size _Z29__device_stub__Z8printMatPdiiPdii, .-_Z29__device_stub__Z8printMatPdiiPdii .globl _Z8printMatPdii .type _Z8printMatPdii, @function _Z8printMatPdii: .LFB2098: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z8printMatPdiiPdii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2098: .size _Z8printMatPdii, .-_Z8printMatPdii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z8printMatPdii" .LC1: .string "_Z11normalize_2PdS_i" .LC2: .string "_Z11normalize_1PdS_iS_i" .LC3: .string "_Z5saxpyPdiS_S_" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC4: .string "_Z17dot_prod_assign_HPdS_iS_S_iii" .section .rodata.str1.1 .LC5: .string "_Z8update_zPdS_S_i" .LC6: .string "_Z8update_qPdS_iii" .LC7: .string "_Z8assign_QPdS_iii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2100: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z8printMatPdii(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z11normalize_2PdS_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z11normalize_1PdS_iS_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z5saxpyPdiS_S_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z17dot_prod_assign_HPdS_iS_S_iii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _Z8update_zPdS_S_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq _Z8update_qPdS_iii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z8assign_QPdS_iii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2100: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* * The names of the kernels are fairly self explanatory * about the task that they are performing. You * may refer to the Arnoldi algorithm on page 4 of the * attached paper to understand the flow. */ #include <cuda_runtime.h> #include <stdio.h> #include <cuda.h> __global__ void assign_Q(double* d_Q, double* d_q, int j, int k, int n) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n) { d_Q[j+(id*k)] = d_q[id]; } } __global__ void update_q(double* d_q, double* d_Q, int i, int col, int n) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n) { d_q[id] = d_Q[id*col + i]; } } __global__ void update_z(double* d_z, double*d_q, double* temp, int n) { __shared__ double t; if(threadIdx.x == 0) t = *temp; __syncthreads(); int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n) { d_z[id] -= t * d_q[id]; } } __global__ void dot_prod_assign_H(double* a, double*b, int n, double* temp, double* d_H, int i, int j, int k) { double t=0; for(int i=0; i<n; i++) t += a[i]*b[i]; if(i==j) d_H[i + j*(k+1)] = t-1; else d_H[i + j*(k+1)] = t; *temp = t; } __global__ void saxpy(double *A, int n, double* z, double* q) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id < n){ double temp = 0; int i = id*n, j = i+n , count=-1; for(; i<j; i++) temp += A[i]*q[++count]; z[id] = temp; } } __global__ void normalize_1(double* q, double* temp, int n, double* d_H, int loc) { double sum = 0; for(int i=0; i<n; i++) { double f = q[i]; sum += f*f; } *temp = sqrt(sum); d_H[loc] = *temp; } __global__ void normalize_2(double* q, double* temp, int n) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n){ q[id] /= *temp ; } } void normalize_assign_H(double *q, double* temp, int n, double* d_H, int loc) { normalize_1<<<1, 1>>>(q, temp, n, d_H, loc); normalize_2<<<n/1024 + 1, 1024>>>(q, temp, n); } __global__ void printMat(double* A, int row, int col) { printf("Printing matrix\n"); int z; for(z=0; z< (row*col); z++) { printf("%f ", A[z]); if((z+1)%col == 0) printf("\n"); } } void parallelArnoldi(double* d_A, double* d_q, int k, double* d_Q, double* d_H, int n, double* d_z, double* temp) { cudaError_t err = cudaSuccess; assign_Q<<<n/1024 + 1, 1024>>>(d_Q, d_q, 0, k+1, n); for(int j=0; j<k; j++) { saxpy<<<n/1024 + 1, 1024>>>(d_A, n, d_z, d_q); for(int i=0; i<=j; i++) { update_q<<<n/1024 + 1, 1024>>>(d_q, d_Q, i, k+1, n); dot_prod_assign_H<<< 1, 1>>>(d_q, d_z, n, temp, d_H, i, j, k); update_z<<<n/1024 + 1, 1024>>>(d_z, d_q, temp, n); } normalize_assign_H(d_z, temp, n, d_H, j+1 + j*(k+1)); cudaMemcpy(d_q, d_z, sizeof(double)*n, cudaMemcpyDeviceToDevice); assign_Q<<<n/1024 + 1, 1024>>>(d_Q, d_q, j+1, k+1, n); } if (err != cudaSuccess) printf("Error above: %s\n", cudaGetErrorString(err)); /* printf("H\n"); printMat<<<1, 1>>>(d_H, k+1, k); cudaDeviceSynchronize(); printf("Q\n"); printMat<<<1, 1>>>(d_Q, n, k+1); cudaDeviceSynchronize(); */ }
/* * The names of the kernels are fairly self explanatory * about the task that they are performing. You * may refer to the Arnoldi algorithm on page 4 of the * attached paper to understand the flow. */ #include <hip/hip_runtime.h> #include <stdio.h> __global__ void assign_Q(double* d_Q, double* d_q, int j, int k, int n) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n) { d_Q[j+(id*k)] = d_q[id]; } } __global__ void update_q(double* d_q, double* d_Q, int i, int col, int n) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n) { d_q[id] = d_Q[id*col + i]; } } __global__ void update_z(double* d_z, double*d_q, double* temp, int n) { __shared__ double t; if(threadIdx.x == 0) t = *temp; __syncthreads(); int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n) { d_z[id] -= t * d_q[id]; } } __global__ void dot_prod_assign_H(double* a, double*b, int n, double* temp, double* d_H, int i, int j, int k) { double t=0; for(int i=0; i<n; i++) t += a[i]*b[i]; if(i==j) d_H[i + j*(k+1)] = t-1; else d_H[i + j*(k+1)] = t; *temp = t; } __global__ void saxpy(double *A, int n, double* z, double* q) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id < n){ double temp = 0; int i = id*n, j = i+n , count=-1; for(; i<j; i++) temp += A[i]*q[++count]; z[id] = temp; } } __global__ void normalize_1(double* q, double* temp, int n, double* d_H, int loc) { double sum = 0; for(int i=0; i<n; i++) { double f = q[i]; sum += f*f; } *temp = sqrt(sum); d_H[loc] = *temp; } __global__ void normalize_2(double* q, double* temp, int n) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n){ q[id] /= *temp ; } } void normalize_assign_H(double *q, double* temp, int n, double* d_H, int loc) { normalize_1<<<1, 1>>>(q, temp, n, d_H, loc); normalize_2<<<n/1024 + 1, 1024>>>(q, temp, n); } __global__ void printMat(double* A, int row, int col) { printf("Printing matrix\n"); int z; for(z=0; z< (row*col); z++) { printf("%f ", A[z]); if((z+1)%col == 0) printf("\n"); } } void parallelArnoldi(double* d_A, double* d_q, int k, double* d_Q, double* d_H, int n, double* d_z, double* temp) { hipError_t err = hipSuccess; assign_Q<<<n/1024 + 1, 1024>>>(d_Q, d_q, 0, k+1, n); for(int j=0; j<k; j++) { saxpy<<<n/1024 + 1, 1024>>>(d_A, n, d_z, d_q); for(int i=0; i<=j; i++) { update_q<<<n/1024 + 1, 1024>>>(d_q, d_Q, i, k+1, n); dot_prod_assign_H<<< 1, 1>>>(d_q, d_z, n, temp, d_H, i, j, k); update_z<<<n/1024 + 1, 1024>>>(d_z, d_q, temp, n); } normalize_assign_H(d_z, temp, n, d_H, j+1 + j*(k+1)); hipMemcpy(d_q, d_z, sizeof(double)*n, hipMemcpyDeviceToDevice); assign_Q<<<n/1024 + 1, 1024>>>(d_Q, d_q, j+1, k+1, n); } if (err != hipSuccess) printf("Error above: %s\n", hipGetErrorString(err)); /* printf("H\n"); printMat<<<1, 1>>>(d_H, k+1, k); cudaDeviceSynchronize(); printf("Q\n"); printMat<<<1, 1>>>(d_Q, n, k+1); cudaDeviceSynchronize(); */ }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* * The names of the kernels are fairly self explanatory * about the task that they are performing. You * may refer to the Arnoldi algorithm on page 4 of the * attached paper to understand the flow. */ #include <hip/hip_runtime.h> #include <stdio.h> __global__ void assign_Q(double* d_Q, double* d_q, int j, int k, int n) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n) { d_Q[j+(id*k)] = d_q[id]; } } __global__ void update_q(double* d_q, double* d_Q, int i, int col, int n) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n) { d_q[id] = d_Q[id*col + i]; } } __global__ void update_z(double* d_z, double*d_q, double* temp, int n) { __shared__ double t; if(threadIdx.x == 0) t = *temp; __syncthreads(); int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n) { d_z[id] -= t * d_q[id]; } } __global__ void dot_prod_assign_H(double* a, double*b, int n, double* temp, double* d_H, int i, int j, int k) { double t=0; for(int i=0; i<n; i++) t += a[i]*b[i]; if(i==j) d_H[i + j*(k+1)] = t-1; else d_H[i + j*(k+1)] = t; *temp = t; } __global__ void saxpy(double *A, int n, double* z, double* q) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id < n){ double temp = 0; int i = id*n, j = i+n , count=-1; for(; i<j; i++) temp += A[i]*q[++count]; z[id] = temp; } } __global__ void normalize_1(double* q, double* temp, int n, double* d_H, int loc) { double sum = 0; for(int i=0; i<n; i++) { double f = q[i]; sum += f*f; } *temp = sqrt(sum); d_H[loc] = *temp; } __global__ void normalize_2(double* q, double* temp, int n) { int id = (blockIdx.x*blockDim.x) + threadIdx.x; if(id<n){ q[id] /= *temp ; } } void normalize_assign_H(double *q, double* temp, int n, double* d_H, int loc) { normalize_1<<<1, 1>>>(q, temp, n, d_H, loc); normalize_2<<<n/1024 + 1, 1024>>>(q, temp, n); } __global__ void printMat(double* A, int row, int col) { printf("Printing matrix\n"); int z; for(z=0; z< (row*col); z++) { printf("%f ", A[z]); if((z+1)%col == 0) printf("\n"); } } void parallelArnoldi(double* d_A, double* d_q, int k, double* d_Q, double* d_H, int n, double* d_z, double* temp) { hipError_t err = hipSuccess; assign_Q<<<n/1024 + 1, 1024>>>(d_Q, d_q, 0, k+1, n); for(int j=0; j<k; j++) { saxpy<<<n/1024 + 1, 1024>>>(d_A, n, d_z, d_q); for(int i=0; i<=j; i++) { update_q<<<n/1024 + 1, 1024>>>(d_q, d_Q, i, k+1, n); dot_prod_assign_H<<< 1, 1>>>(d_q, d_z, n, temp, d_H, i, j, k); update_z<<<n/1024 + 1, 1024>>>(d_z, d_q, temp, n); } normalize_assign_H(d_z, temp, n, d_H, j+1 + j*(k+1)); hipMemcpy(d_q, d_z, sizeof(double)*n, hipMemcpyDeviceToDevice); assign_Q<<<n/1024 + 1, 1024>>>(d_Q, d_q, j+1, k+1, n); } if (err != hipSuccess) printf("Error above: %s\n", hipGetErrorString(err)); /* printf("H\n"); printMat<<<1, 1>>>(d_H, k+1, k); cudaDeviceSynchronize(); printf("Q\n"); printMat<<<1, 1>>>(d_Q, n, k+1); cudaDeviceSynchronize(); */ }
.text .file "p_arnoldi.hip" .globl _Z23__device_stub__assign_QPdS_iii # -- Begin function _Z23__device_stub__assign_QPdS_iii .p2align 4, 0x90 .type _Z23__device_stub__assign_QPdS_iii,@function _Z23__device_stub__assign_QPdS_iii: # @_Z23__device_stub__assign_QPdS_iii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 4(%rsp), %rax movq %rax, 112(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8assign_QPdS_iii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z23__device_stub__assign_QPdS_iii, .Lfunc_end0-_Z23__device_stub__assign_QPdS_iii .cfi_endproc # -- End function .globl _Z23__device_stub__update_qPdS_iii # -- Begin function _Z23__device_stub__update_qPdS_iii .p2align 4, 0x90 .type _Z23__device_stub__update_qPdS_iii,@function _Z23__device_stub__update_qPdS_iii: # @_Z23__device_stub__update_qPdS_iii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 4(%rsp), %rax movq %rax, 112(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8update_qPdS_iii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z23__device_stub__update_qPdS_iii, .Lfunc_end1-_Z23__device_stub__update_qPdS_iii .cfi_endproc # -- End function .globl _Z23__device_stub__update_zPdS_S_i # -- Begin function _Z23__device_stub__update_zPdS_S_i .p2align 4, 0x90 .type _Z23__device_stub__update_zPdS_S_i,@function _Z23__device_stub__update_zPdS_S_i: # @_Z23__device_stub__update_zPdS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8update_zPdS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end2: .size _Z23__device_stub__update_zPdS_S_i, .Lfunc_end2-_Z23__device_stub__update_zPdS_S_i .cfi_endproc # -- End function .globl _Z32__device_stub__dot_prod_assign_HPdS_iS_S_iii # -- Begin function _Z32__device_stub__dot_prod_assign_HPdS_iS_S_iii .p2align 4, 0x90 .type _Z32__device_stub__dot_prod_assign_HPdS_iS_S_iii,@function _Z32__device_stub__dot_prod_assign_HPdS_iS_S_iii: # @_Z32__device_stub__dot_prod_assign_HPdS_iS_S_iii .cfi_startproc # %bb.0: subq $168, %rsp .cfi_def_cfa_offset 176 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movl %edx, 12(%rsp) movq %rcx, 72(%rsp) movq %r8, 64(%rsp) movl %r9d, 8(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 72(%rsp), %rax movq %rax, 120(%rsp) leaq 64(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 176(%rsp), %rax movq %rax, 144(%rsp) leaq 184(%rsp), %rax movq %rax, 152(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z17dot_prod_assign_HPdS_iS_S_iii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $184, %rsp .cfi_adjust_cfa_offset -184 retq .Lfunc_end3: .size _Z32__device_stub__dot_prod_assign_HPdS_iS_S_iii, .Lfunc_end3-_Z32__device_stub__dot_prod_assign_HPdS_iS_S_iii .cfi_endproc # -- End function .globl _Z20__device_stub__saxpyPdiS_S_ # -- Begin function _Z20__device_stub__saxpyPdiS_S_ .p2align 4, 0x90 .type _Z20__device_stub__saxpyPdiS_S_,@function _Z20__device_stub__saxpyPdiS_S_: # @_Z20__device_stub__saxpyPdiS_S_ .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movl %esi, 4(%rsp) movq %rdx, 64(%rsp) movq %rcx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z5saxpyPdiS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end4: .size _Z20__device_stub__saxpyPdiS_S_, .Lfunc_end4-_Z20__device_stub__saxpyPdiS_S_ .cfi_endproc # -- End function .globl _Z26__device_stub__normalize_1PdS_iS_i # -- Begin function _Z26__device_stub__normalize_1PdS_iS_i .p2align 4, 0x90 .type _Z26__device_stub__normalize_1PdS_iS_i,@function _Z26__device_stub__normalize_1PdS_iS_i: # @_Z26__device_stub__normalize_1PdS_iS_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 4(%rsp) movq %rcx, 56(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 4(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11normalize_1PdS_iS_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end5: .size _Z26__device_stub__normalize_1PdS_iS_i, .Lfunc_end5-_Z26__device_stub__normalize_1PdS_iS_i .cfi_endproc # -- End function .globl _Z26__device_stub__normalize_2PdS_i # -- Begin function _Z26__device_stub__normalize_2PdS_i .p2align 4, 0x90 .type _Z26__device_stub__normalize_2PdS_i,@function _Z26__device_stub__normalize_2PdS_i: # @_Z26__device_stub__normalize_2PdS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11normalize_2PdS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end6: .size _Z26__device_stub__normalize_2PdS_i, .Lfunc_end6-_Z26__device_stub__normalize_2PdS_i .cfi_endproc # -- End function .globl _Z18normalize_assign_HPdS_iS_i # -- Begin function _Z18normalize_assign_HPdS_iS_i .p2align 4, 0x90 .type _Z18normalize_assign_HPdS_iS_i,@function _Z18normalize_assign_HPdS_iS_i: # @_Z18normalize_assign_HPdS_iS_i .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $120, %rsp .cfi_def_cfa_offset 176 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %r8d, %ebp movq %rcx, %r13 movl %edx, %ebx movq %rsi, %r14 movq %rdi, %r15 movabsq $4294967297, %r12 # imm = 0x100000001 movq %r12, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB7_2 # %bb.1: movq %r15, 64(%rsp) movq %r14, 56(%rsp) movl %ebx, 76(%rsp) movq %r13, 16(%rsp) movl %ebp, 72(%rsp) leaq 64(%rsp), %rax movq %rax, 80(%rsp) leaq 56(%rsp), %rax movq %rax, 88(%rsp) leaq 76(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11normalize_1PdS_iS_i, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB7_2: leal 1023(%rbx), %eax testl %ebx, %ebx cmovnsl %ebx, %eax sarl $10, %eax incl %eax leaq (%r12,%rax), %rdi decq %rdi addq $1023, %r12 # imm = 0x3FF movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB7_4 # %bb.3: movq %r15, 64(%rsp) movq %r14, 56(%rsp) movl %ebx, (%rsp) leaq 64(%rsp), %rax movq %rax, 80(%rsp) leaq 56(%rsp), %rax movq %rax, 88(%rsp) movq %rsp, %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11normalize_2PdS_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB7_4: addq $120, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end7: .size _Z18normalize_assign_HPdS_iS_i, .Lfunc_end7-_Z18normalize_assign_HPdS_iS_i .cfi_endproc # -- End function .globl _Z23__device_stub__printMatPdii # -- Begin function _Z23__device_stub__printMatPdii .p2align 4, 0x90 .type _Z23__device_stub__printMatPdii,@function _Z23__device_stub__printMatPdii: # @_Z23__device_stub__printMatPdii .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) movq %rsp, %rax movq %rax, 80(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z8printMatPdii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end8: .size _Z23__device_stub__printMatPdii, .Lfunc_end8-_Z23__device_stub__printMatPdii .cfi_endproc # -- End function .globl _Z15parallelArnoldiPdS_iS_S_iS_S_ # -- Begin function _Z15parallelArnoldiPdS_iS_S_iS_S_ .p2align 4, 0x90 .type _Z15parallelArnoldiPdS_iS_S_iS_S_,@function _Z15parallelArnoldiPdS_iS_S_iS_S_: # @_Z15parallelArnoldiPdS_iS_S_iS_S_ .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $232, %rsp .cfi_def_cfa_offset 288 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %r9d, %ebx movq %r8, 208(%rsp) # 8-byte Spill movq %rcx, 168(%rsp) # 8-byte Spill movl %edx, %ebp movq %rsi, %r13 movq %rdi, 216(%rsp) # 8-byte Spill leal 1023(%rbx), %eax testl %r9d, %r9d cmovnsl %r9d, %eax movabsq $4294968320, %rdx # imm = 0x100000400 sarl $10, %eax incl %eax leaq (%rax,%rdx), %r15 addq $-1024, %r15 # imm = 0xFC00 movq %r15, %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB9_2 # %bb.1: leal 1(%rbp), %eax movq 168(%rsp), %rcx # 8-byte Reload movq %rcx, 80(%rsp) movq %r13, 72(%rsp) movl $0, 16(%rsp) movl %eax, 8(%rsp) movl %ebx, 4(%rsp) leaq 80(%rsp), %rax movq %rax, 96(%rsp) leaq 72(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 4(%rsp), %rax movq %rax, 128(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z8assign_QPdS_iii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB9_2: testl %ebp, %ebp jle .LBB9_16 # %bb.3: # %.lr.ph leal 1(%rbp), %eax movl %eax, 92(%rsp) # 4-byte Spill movslq %ebx, %rax shlq $3, %rax movq %rax, 224(%rsp) # 8-byte Spill xorl %eax, %eax movq %rax, 176(%rsp) # 8-byte Spill movl $1, %r14d movabsq $4294968320, %rax # imm = 0x100000400 leaq -1023(%rax), %r12 movq %rbp, 200(%rsp) # 8-byte Spill jmp .LBB9_4 .p2align 4, 0x90 .LBB9_13: # in Loop: Header=BB9_4 Depth=1 movq 176(%rsp), %rcx # 8-byte Reload leal 1(%rcx), %eax movl %eax, 184(%rsp) # 4-byte Spill movl %ecx, %eax imull 92(%rsp), %eax # 4-byte Folded Reload leal (%rcx,%rax), %r8d incl %r8d movq 288(%rsp), %rbp movq %rbp, %rdi movq 296(%rsp), %rsi movl %ebx, %edx movq 208(%rsp), %rcx # 8-byte Reload callq _Z18normalize_assign_HPdS_iS_i movq %r13, %rdi movq %rbp, %rsi movl 184(%rsp), %ebp # 4-byte Reload movq 224(%rsp), %rdx # 8-byte Reload movl $3, %ecx callq hipMemcpy movq %r15, %rdi movl $1, %esi movabsq $4294968320, %rdx # imm = 0x100000400 movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax je .LBB9_14 .LBB9_15: # in Loop: Header=BB9_4 Depth=1 incl %r14d movl %ebp, %eax movq %rax, 176(%rsp) # 8-byte Spill movq 200(%rsp), %rax # 8-byte Reload cmpl %eax, %ebp je .LBB9_16 .LBB9_4: # =>This Loop Header: Depth=1 # Child Loop BB9_6 Depth 2 movq %r15, %rdi movl $1, %esi movabsq $4294968320, %rdx # imm = 0x100000400 movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB9_5 # %bb.17: # in Loop: Header=BB9_4 Depth=1 movq 216(%rsp), %rax # 8-byte Reload movq %rax, 80(%rsp) movl %ebx, 8(%rsp) movq 288(%rsp), %rax movq %rax, 72(%rsp) movq %r13, 32(%rsp) leaq 80(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d movl $_Z5saxpyPdiS_S_, %edi leaq 96(%rsp), %r9 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB9_5: # %.preheader # in Loop: Header=BB9_4 Depth=1 xorl %ebp, %ebp jmp .LBB9_6 .p2align 4, 0x90 .LBB9_12: # in Loop: Header=BB9_6 Depth=2 incl %ebp cmpl %ebp, %r14d je .LBB9_13 .LBB9_6: # Parent Loop BB9_4 Depth=1 # => This Inner Loop Header: Depth=2 movq %r15, %rdi movl $1, %esi movabsq $4294968320, %rdx # imm = 0x100000400 movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB9_8 # %bb.7: # in Loop: Header=BB9_6 Depth=2 movq %r13, 80(%rsp) movq 168(%rsp), %rax # 8-byte Reload movq %rax, 72(%rsp) movl %ebp, 16(%rsp) movl 92(%rsp), %eax # 4-byte Reload movl %eax, 8(%rsp) movl %ebx, 4(%rsp) leaq 80(%rsp), %rax movq %rax, 96(%rsp) leaq 72(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 4(%rsp), %rax movq %rax, 128(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d movl $_Z8update_qPdS_iii, %edi leaq 96(%rsp), %r9 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB9_8: # in Loop: Header=BB9_6 Depth=2 movq %r12, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB9_10 # %bb.9: # in Loop: Header=BB9_6 Depth=2 movq %r13, 80(%rsp) movq 288(%rsp), %rax movq %rax, 72(%rsp) movl %ebx, 4(%rsp) movq 296(%rsp), %rax movq %rax, 32(%rsp) movq 208(%rsp), %rax # 8-byte Reload movq %rax, 24(%rsp) movl %ebp, 196(%rsp) movq 176(%rsp), %rax # 8-byte Reload movl %eax, 192(%rsp) movq 200(%rsp), %rax # 8-byte Reload movl %eax, 188(%rsp) leaq 80(%rsp), %rax movq %rax, 96(%rsp) leaq 72(%rsp), %rax movq %rax, 104(%rsp) leaq 4(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 196(%rsp), %rax movq %rax, 136(%rsp) leaq 192(%rsp), %rax movq %rax, 144(%rsp) leaq 188(%rsp), %rax movq %rax, 152(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d movl $_Z17dot_prod_assign_HPdS_iS_S_iii, %edi leaq 96(%rsp), %r9 pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB9_10: # in Loop: Header=BB9_6 Depth=2 movq %r15, %rdi movl $1, %esi movabsq $4294968320, %rdx # imm = 0x100000400 movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB9_12 # %bb.11: # in Loop: Header=BB9_6 Depth=2 movq 288(%rsp), %rax movq %rax, 80(%rsp) movq %r13, 72(%rsp) movq 296(%rsp), %rax movq %rax, 32(%rsp) movl %ebx, 8(%rsp) leaq 80(%rsp), %rax movq %rax, 96(%rsp) leaq 72(%rsp), %rax movq %rax, 104(%rsp) leaq 32(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d movl $_Z8update_zPdS_S_i, %edi leaq 96(%rsp), %r9 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 jmp .LBB9_12 .p2align 4, 0x90 .LBB9_14: # in Loop: Header=BB9_4 Depth=1 movq 168(%rsp), %rax # 8-byte Reload movq %rax, 80(%rsp) movq %r13, 72(%rsp) movl %ebp, 16(%rsp) movl 92(%rsp), %eax # 4-byte Reload movl %eax, 8(%rsp) movl %ebx, 4(%rsp) leaq 80(%rsp), %rax movq %rax, 96(%rsp) leaq 72(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 4(%rsp), %rax movq %rax, 128(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d movl $_Z8assign_QPdS_iii, %edi leaq 96(%rsp), %r9 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 jmp .LBB9_15 .LBB9_16: # %._crit_edge addq $232, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end9: .size _Z15parallelArnoldiPdS_iS_S_iS_S_, .Lfunc_end9-_Z15parallelArnoldiPdS_iS_S_iS_S_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB10_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB10_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8assign_QPdS_iii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8update_qPdS_iii, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8update_zPdS_S_i, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17dot_prod_assign_HPdS_iS_S_iii, %esi movl $.L__unnamed_4, %edx movl $.L__unnamed_4, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5saxpyPdiS_S_, %esi movl $.L__unnamed_5, %edx movl $.L__unnamed_5, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11normalize_1PdS_iS_i, %esi movl $.L__unnamed_6, %edx movl $.L__unnamed_6, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11normalize_2PdS_i, %esi movl $.L__unnamed_7, %edx movl $.L__unnamed_7, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8printMatPdii, %esi movl $.L__unnamed_8, %edx movl $.L__unnamed_8, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end10: .size __hip_module_ctor, .Lfunc_end10-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB11_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB11_2: retq .Lfunc_end11: .size __hip_module_dtor, .Lfunc_end11-__hip_module_dtor .cfi_endproc # -- End function .type _Z8assign_QPdS_iii,@object # @_Z8assign_QPdS_iii .section .rodata,"a",@progbits .globl _Z8assign_QPdS_iii .p2align 3, 0x0 _Z8assign_QPdS_iii: .quad _Z23__device_stub__assign_QPdS_iii .size _Z8assign_QPdS_iii, 8 .type _Z8update_qPdS_iii,@object # @_Z8update_qPdS_iii .globl _Z8update_qPdS_iii .p2align 3, 0x0 _Z8update_qPdS_iii: .quad _Z23__device_stub__update_qPdS_iii .size _Z8update_qPdS_iii, 8 .type _Z8update_zPdS_S_i,@object # @_Z8update_zPdS_S_i .globl _Z8update_zPdS_S_i .p2align 3, 0x0 _Z8update_zPdS_S_i: .quad _Z23__device_stub__update_zPdS_S_i .size _Z8update_zPdS_S_i, 8 .type _Z17dot_prod_assign_HPdS_iS_S_iii,@object # @_Z17dot_prod_assign_HPdS_iS_S_iii .globl _Z17dot_prod_assign_HPdS_iS_S_iii .p2align 3, 0x0 _Z17dot_prod_assign_HPdS_iS_S_iii: .quad _Z32__device_stub__dot_prod_assign_HPdS_iS_S_iii .size _Z17dot_prod_assign_HPdS_iS_S_iii, 8 .type _Z5saxpyPdiS_S_,@object # @_Z5saxpyPdiS_S_ .globl _Z5saxpyPdiS_S_ .p2align 3, 0x0 _Z5saxpyPdiS_S_: .quad _Z20__device_stub__saxpyPdiS_S_ .size _Z5saxpyPdiS_S_, 8 .type _Z11normalize_1PdS_iS_i,@object # @_Z11normalize_1PdS_iS_i .globl _Z11normalize_1PdS_iS_i .p2align 3, 0x0 _Z11normalize_1PdS_iS_i: .quad _Z26__device_stub__normalize_1PdS_iS_i .size _Z11normalize_1PdS_iS_i, 8 .type _Z11normalize_2PdS_i,@object # @_Z11normalize_2PdS_i .globl _Z11normalize_2PdS_i .p2align 3, 0x0 _Z11normalize_2PdS_i: .quad _Z26__device_stub__normalize_2PdS_i .size _Z11normalize_2PdS_i, 8 .type _Z8printMatPdii,@object # @_Z8printMatPdii .globl _Z8printMatPdii .p2align 3, 0x0 _Z8printMatPdii: .quad _Z23__device_stub__printMatPdii .size _Z8printMatPdii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z8assign_QPdS_iii" .size .L__unnamed_1, 19 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z8update_qPdS_iii" .size .L__unnamed_2, 19 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z8update_zPdS_S_i" .size .L__unnamed_3, 19 .type .L__unnamed_4,@object # @3 .L__unnamed_4: .asciz "_Z17dot_prod_assign_HPdS_iS_S_iii" .size .L__unnamed_4, 34 .type .L__unnamed_5,@object # @4 .L__unnamed_5: .asciz "_Z5saxpyPdiS_S_" .size .L__unnamed_5, 16 .type .L__unnamed_6,@object # @5 .L__unnamed_6: .asciz "_Z11normalize_1PdS_iS_i" .size .L__unnamed_6, 24 .type .L__unnamed_7,@object # @6 .L__unnamed_7: .asciz "_Z11normalize_2PdS_i" .size .L__unnamed_7, 21 .type .L__unnamed_8,@object # @7 .L__unnamed_8: .asciz "_Z8printMatPdii" .size .L__unnamed_8, 16 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z23__device_stub__assign_QPdS_iii .addrsig_sym _Z23__device_stub__update_qPdS_iii .addrsig_sym _Z23__device_stub__update_zPdS_S_i .addrsig_sym _Z32__device_stub__dot_prod_assign_HPdS_iS_S_iii .addrsig_sym _Z20__device_stub__saxpyPdiS_S_ .addrsig_sym _Z26__device_stub__normalize_1PdS_iS_i .addrsig_sym _Z26__device_stub__normalize_2PdS_i .addrsig_sym _Z23__device_stub__printMatPdii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z8assign_QPdS_iii .addrsig_sym _Z8update_qPdS_iii .addrsig_sym _Z8update_zPdS_S_i .addrsig_sym _Z17dot_prod_assign_HPdS_iS_S_iii .addrsig_sym _Z5saxpyPdiS_S_ .addrsig_sym _Z11normalize_1PdS_iS_i .addrsig_sym _Z11normalize_2PdS_i .addrsig_sym _Z8printMatPdii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __device__ void swap(int &a, int &b){ int t = a; a = b; b = t; } __global__ void littleBinoticSort(int* arr,int num, int numMax){ unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid >= num) arr[tid] = INT_MAX; __syncthreads(); for(unsigned int i=2; i<=numMax; i<<=1){ for(unsigned int j=i>>1; j>0; j>>=1){ unsigned int swapIdx = tid ^ j; if(swapIdx > tid){ if((tid & i)==0){ if(arr[tid] > arr[swapIdx]){ swap(arr[tid], arr[swapIdx]); } } else{ if(arr[tid] < arr[swapIdx]){ swap(arr[tid], arr[swapIdx]); } } } __syncthreads(); } } }
code for sm_80 Function : _Z17littleBinoticSortPiii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0030*/ IMAD.MOV.U32 R4, RZ, RZ, 0x2 ; /* 0x00000002ff047424 */ /* 0x000fe400078e00ff */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fe400078e0203 */ /*0060*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fc600078e00ff */ /*0070*/ ISETP.GE.U32.AND P0, PT, R0.reuse, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */ /* 0x040fe20003f06070 */ /*0080*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fd800078e0003 */ /*0090*/ @P0 IMAD.MOV.U32 R5, RZ, RZ, 0x7fffffff ; /* 0x7fffffffff050424 */ /* 0x000fca00078e00ff */ /*00a0*/ @P0 STG.E [R2.64], R5 ; /* 0x0000000502000986 */ /* 0x0001e8000c101904 */ /*00b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*00c0*/ ISETP.LE.U32.AND P0, PT, R4, c[0x0][0x16c], PT ; /* 0x00005b0004007a0c */ /* 0x000fda0003f03070 */ /*00d0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*00e0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x2 ; /* 0x00000002ff077424 */ /* 0x001fca00078e00ff */ /*00f0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */ /* 0x000fda0003f05270 */ /*0100*/ @!P0 BRA 0x290 ; /* 0x0000018000008947 */ /* 0x001fea0003800000 */ /*0110*/ SHF.R.U32.HI R9, RZ, 0x1, R7 ; /* 0x00000001ff097819 */ /* 0x000fc80000011607 */ /*0120*/ LOP3.LUT R5, R9, R0.reuse, RZ, 0x3c, !PT ; /* 0x0000000009057212 */ /* 0x081fe200078e3cff */ /*0130*/ BSSY B0, 0x250 ; /* 0x0000011000007945 */ /* 0x000fe60003800000 */ /*0140*/ ISETP.GT.U32.AND P0, PT, R5, R0, PT ; /* 0x000000000500720c */ /* 0x000fda0003f04070 */ /*0150*/ @!P0 BRA 0x240 ; /* 0x000000e000008947 */ /* 0x000fea0003800000 */ /*0160*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */ /* 0x000fe200078e00ff */ /*0170*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x000166000c1e1900 */ /*0180*/ IMAD.WIDE.U32 R4, R5, R4, c[0x0][0x160] ; /* 0x0000580005047625 */ /* 0x000fca00078e0004 */ /*0190*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */ /* 0x000162000c1e1900 */ /*01a0*/ LOP3.LUT P0, RZ, R7, R0, RZ, 0xc0, !PT ; /* 0x0000000007ff7212 */ /* 0x000fda000780c0ff */ /*01b0*/ @!P0 BRA 0x210 ; /* 0x0000005000008947 */ /* 0x000fea0003800000 */ /*01c0*/ ISETP.GE.AND P0, PT, R11, R6, PT ; /* 0x000000060b00720c */ /* 0x021fda0003f06270 */ /*01d0*/ @P0 BRA 0x240 ; /* 0x0000006000000947 */ /* 0x000fea0003800000 */ /*01e0*/ STG.E [R2.64], R6 ; /* 0x0000000602007986 */ /* 0x0001e8000c101904 */ /*01f0*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */ /* 0x0001e2000c101904 */ /*0200*/ BRA 0x240 ; /* 0x0000003000007947 */ /* 0x000fea0003800000 */ /*0210*/ ISETP.GT.AND P0, PT, R11, R6, PT ; /* 0x000000060b00720c */ /* 0x021fda0003f04270 */ /*0220*/ @P0 STG.E [R2.64], R6 ; /* 0x0000000602000986 */ /* 0x0001e8000c101904 */ /*0230*/ @P0 STG.E [R4.64], R11 ; /* 0x0000000b04000986 */ /* 0x0001e4000c101904 */ /*0240*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0250*/ SHF.R.U32.HI R9, RZ, 0x1, R9 ; /* 0x00000001ff097819 */ /* 0x000fe20000011609 */ /*0260*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe60000010000 */ /*0270*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */ /* 0x000fda0003f05270 */ /*0280*/ @P0 BRA 0x120 ; /* 0xfffffe9000000947 */ /* 0x000fea000383ffff */ /*0290*/ IMAD.SHL.U32 R7, R7, 0x2, RZ ; /* 0x0000000207077824 */ /* 0x000fca00078e00ff */ /*02a0*/ ISETP.GT.U32.AND P0, PT, R7, c[0x0][0x16c], PT ; /* 0x00005b0007007a0c */ /* 0x000fda0003f04070 */ /*02b0*/ @!P0 BRA 0xf0 ; /* 0xfffffe3000008947 */ /* 0x000fea000383ffff */ /*02c0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*02d0*/ BRA 0x2d0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __device__ void swap(int &a, int &b){ int t = a; a = b; b = t; } __global__ void littleBinoticSort(int* arr,int num, int numMax){ unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid >= num) arr[tid] = INT_MAX; __syncthreads(); for(unsigned int i=2; i<=numMax; i<<=1){ for(unsigned int j=i>>1; j>0; j>>=1){ unsigned int swapIdx = tid ^ j; if(swapIdx > tid){ if((tid & i)==0){ if(arr[tid] > arr[swapIdx]){ swap(arr[tid], arr[swapIdx]); } } else{ if(arr[tid] < arr[swapIdx]){ swap(arr[tid], arr[swapIdx]); } } } __syncthreads(); } } }
.file "tmpxft_00160d42_00000000-6_littleBinoticSort.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2030: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2030: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z4swapRiS_ .type _Z4swapRiS_, @function _Z4swapRiS_: .LFB2027: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2027: .size _Z4swapRiS_, .-_Z4swapRiS_ .globl _Z39__device_stub__Z17littleBinoticSortPiiiPiii .type _Z39__device_stub__Z17littleBinoticSortPiiiPiii, @function _Z39__device_stub__Z17littleBinoticSortPiiiPiii: .LFB2052: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movq %rsp, %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L9 .L5: movq 104(%rsp), %rax subq %fs:40, %rax jne .L10 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z17littleBinoticSortPiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L5 .L10: call __stack_chk_fail@PLT .cfi_endproc .LFE2052: .size _Z39__device_stub__Z17littleBinoticSortPiiiPiii, .-_Z39__device_stub__Z17littleBinoticSortPiiiPiii .globl _Z17littleBinoticSortPiii .type _Z17littleBinoticSortPiii, @function _Z17littleBinoticSortPiii: .LFB2053: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z17littleBinoticSortPiiiPiii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2053: .size _Z17littleBinoticSortPiii, .-_Z17littleBinoticSortPiii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z17littleBinoticSortPiii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2055: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z17littleBinoticSortPiii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2055: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __device__ void swap(int &a, int &b){ int t = a; a = b; b = t; } __global__ void littleBinoticSort(int* arr,int num, int numMax){ unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid >= num) arr[tid] = INT_MAX; __syncthreads(); for(unsigned int i=2; i<=numMax; i<<=1){ for(unsigned int j=i>>1; j>0; j>>=1){ unsigned int swapIdx = tid ^ j; if(swapIdx > tid){ if((tid & i)==0){ if(arr[tid] > arr[swapIdx]){ swap(arr[tid], arr[swapIdx]); } } else{ if(arr[tid] < arr[swapIdx]){ swap(arr[tid], arr[swapIdx]); } } } __syncthreads(); } } }
#include <hip/hip_runtime.h> #include "includes.h" __device__ void swap(int &a, int &b){ int t = a; a = b; b = t; } __global__ void littleBinoticSort(int* arr,int num, int numMax){ unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid >= num) arr[tid] = INT_MAX; __syncthreads(); for(unsigned int i=2; i<=numMax; i<<=1){ for(unsigned int j=i>>1; j>0; j>>=1){ unsigned int swapIdx = tid ^ j; if(swapIdx > tid){ if((tid & i)==0){ if(arr[tid] > arr[swapIdx]){ swap(arr[tid], arr[swapIdx]); } } else{ if(arr[tid] < arr[swapIdx]){ swap(arr[tid], arr[swapIdx]); } } } __syncthreads(); } } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __device__ void swap(int &a, int &b){ int t = a; a = b; b = t; } __global__ void littleBinoticSort(int* arr,int num, int numMax){ unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid >= num) arr[tid] = INT_MAX; __syncthreads(); for(unsigned int i=2; i<=numMax; i<<=1){ for(unsigned int j=i>>1; j>0; j>>=1){ unsigned int swapIdx = tid ^ j; if(swapIdx > tid){ if((tid & i)==0){ if(arr[tid] > arr[swapIdx]){ swap(arr[tid], arr[swapIdx]); } } else{ if(arr[tid] < arr[swapIdx]){ swap(arr[tid], arr[swapIdx]); } } } __syncthreads(); } } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17littleBinoticSortPiii .globl _Z17littleBinoticSortPiii .p2align 8 .type _Z17littleBinoticSortPiii,@function _Z17littleBinoticSortPiii: s_clause 0x2 s_load_b32 s4, s[0:1], 0x1c s_load_b32 s5, s[0:1], 0x8 s_load_b64 s[2:3], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s4, s4, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1] s_mov_b32 s4, exec_lo v_cmpx_le_u32_e64 s5, v1 s_cbranch_execz .LBB0_2 v_mov_b32_e32 v2, 0 v_bfrev_b32_e32 v0, -2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[1:2] v_add_co_u32 v2, vcc_lo, s2, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo global_store_b32 v[2:3], v0, off .LBB0_2: s_or_b32 exec_lo, exec_lo, s4 s_load_b32 s1, s[0:1], 0xc s_mov_b32 s4, 2 s_waitcnt lgkmcnt(0) s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv s_cmp_lt_u32 s1, 2 s_cbranch_scc1 .LBB0_15 v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[1:2] v_add_co_u32 v2, vcc_lo, s2, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo s_branch .LBB0_5 .LBB0_4: s_set_inst_prefetch_distance 0x2 s_lshl_b32 s4, s4, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_gt_u32 s4, s1 s_cbranch_scc1 .LBB0_15 .LBB0_5: s_cmp_eq_u32 s4, 0 s_cbranch_scc1 .LBB0_4 v_and_b32_e32 v0, s4, v1 s_mov_b32 s5, s4 s_delay_alu instid0(VALU_DEP_1) v_cmp_ne_u32_e32 vcc_lo, 0, v0 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_8 .p2align 6 .LBB0_7: s_or_b32 exec_lo, exec_lo, s7 s_cmp_lt_u32 s6, 4 s_waitcnt vmcnt(0) s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_4 .LBB0_8: s_mov_b32 s6, s5 s_lshr_b32 s5, s5, 1 s_mov_b32 s7, exec_lo v_xor_b32_e32 v4, s5, v1 s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_u32_e64 v4, v1 s_cbranch_execz .LBB0_7 v_mov_b32_e32 v5, 0 s_mov_b32 s8, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[4:5] v_add_co_u32 v4, s0, s2, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_add_co_ci_u32_e64 v5, s0, s3, v5, s0 global_load_b32 v0, v[4:5], off s_and_saveexec_b32 s0, vcc_lo s_xor_b32 s9, exec_lo, s0 s_cbranch_execz .LBB0_11 global_load_b32 v6, v[2:3], off s_waitcnt vmcnt(0) v_cmp_lt_i32_e64 s0, v6, v0 s_delay_alu instid0(VALU_DEP_1) s_and_b32 s8, s0, exec_lo .LBB0_11: s_and_not1_saveexec_b32 s9, s9 s_cbranch_execz .LBB0_13 global_load_b32 v6, v[2:3], off s_and_not1_b32 s8, s8, exec_lo s_waitcnt vmcnt(0) v_cmp_gt_i32_e64 s0, v6, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s0, s0, exec_lo s_or_b32 s8, s8, s0 .LBB0_13: s_or_b32 exec_lo, exec_lo, s9 s_delay_alu instid0(SALU_CYCLE_1) s_and_b32 exec_lo, exec_lo, s8 s_cbranch_execz .LBB0_7 s_waitcnt vmcnt(0) global_store_b32 v[2:3], v0, off global_store_b32 v[4:5], v6, off s_branch .LBB0_7 .LBB0_15: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17littleBinoticSortPiii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17littleBinoticSortPiii, .Lfunc_end0-_Z17littleBinoticSortPiii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17littleBinoticSortPiii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17littleBinoticSortPiii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __device__ void swap(int &a, int &b){ int t = a; a = b; b = t; } __global__ void littleBinoticSort(int* arr,int num, int numMax){ unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid >= num) arr[tid] = INT_MAX; __syncthreads(); for(unsigned int i=2; i<=numMax; i<<=1){ for(unsigned int j=i>>1; j>0; j>>=1){ unsigned int swapIdx = tid ^ j; if(swapIdx > tid){ if((tid & i)==0){ if(arr[tid] > arr[swapIdx]){ swap(arr[tid], arr[swapIdx]); } } else{ if(arr[tid] < arr[swapIdx]){ swap(arr[tid], arr[swapIdx]); } } } __syncthreads(); } } }
.text .file "littleBinoticSort.hip" .globl _Z32__device_stub__littleBinoticSortPiii # -- Begin function _Z32__device_stub__littleBinoticSortPiii .p2align 4, 0x90 .type _Z32__device_stub__littleBinoticSortPiii,@function _Z32__device_stub__littleBinoticSortPiii: # @_Z32__device_stub__littleBinoticSortPiii .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) movq %rsp, %rax movq %rax, 80(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z17littleBinoticSortPiii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z32__device_stub__littleBinoticSortPiii, .Lfunc_end0-_Z32__device_stub__littleBinoticSortPiii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17littleBinoticSortPiii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z17littleBinoticSortPiii,@object # @_Z17littleBinoticSortPiii .section .rodata,"a",@progbits .globl _Z17littleBinoticSortPiii .p2align 3, 0x0 _Z17littleBinoticSortPiii: .quad _Z32__device_stub__littleBinoticSortPiii .size _Z17littleBinoticSortPiii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z17littleBinoticSortPiii" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z32__device_stub__littleBinoticSortPiii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z17littleBinoticSortPiii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z17littleBinoticSortPiii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0030*/ IMAD.MOV.U32 R4, RZ, RZ, 0x2 ; /* 0x00000002ff047424 */ /* 0x000fe400078e00ff */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fe400078e0203 */ /*0060*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fc600078e00ff */ /*0070*/ ISETP.GE.U32.AND P0, PT, R0.reuse, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */ /* 0x040fe20003f06070 */ /*0080*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fd800078e0003 */ /*0090*/ @P0 IMAD.MOV.U32 R5, RZ, RZ, 0x7fffffff ; /* 0x7fffffffff050424 */ /* 0x000fca00078e00ff */ /*00a0*/ @P0 STG.E [R2.64], R5 ; /* 0x0000000502000986 */ /* 0x0001e8000c101904 */ /*00b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*00c0*/ ISETP.LE.U32.AND P0, PT, R4, c[0x0][0x16c], PT ; /* 0x00005b0004007a0c */ /* 0x000fda0003f03070 */ /*00d0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*00e0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x2 ; /* 0x00000002ff077424 */ /* 0x001fca00078e00ff */ /*00f0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */ /* 0x000fda0003f05270 */ /*0100*/ @!P0 BRA 0x290 ; /* 0x0000018000008947 */ /* 0x001fea0003800000 */ /*0110*/ SHF.R.U32.HI R9, RZ, 0x1, R7 ; /* 0x00000001ff097819 */ /* 0x000fc80000011607 */ /*0120*/ LOP3.LUT R5, R9, R0.reuse, RZ, 0x3c, !PT ; /* 0x0000000009057212 */ /* 0x081fe200078e3cff */ /*0130*/ BSSY B0, 0x250 ; /* 0x0000011000007945 */ /* 0x000fe60003800000 */ /*0140*/ ISETP.GT.U32.AND P0, PT, R5, R0, PT ; /* 0x000000000500720c */ /* 0x000fda0003f04070 */ /*0150*/ @!P0 BRA 0x240 ; /* 0x000000e000008947 */ /* 0x000fea0003800000 */ /*0160*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */ /* 0x000fe200078e00ff */ /*0170*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x000166000c1e1900 */ /*0180*/ IMAD.WIDE.U32 R4, R5, R4, c[0x0][0x160] ; /* 0x0000580005047625 */ /* 0x000fca00078e0004 */ /*0190*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */ /* 0x000162000c1e1900 */ /*01a0*/ LOP3.LUT P0, RZ, R7, R0, RZ, 0xc0, !PT ; /* 0x0000000007ff7212 */ /* 0x000fda000780c0ff */ /*01b0*/ @!P0 BRA 0x210 ; /* 0x0000005000008947 */ /* 0x000fea0003800000 */ /*01c0*/ ISETP.GE.AND P0, PT, R11, R6, PT ; /* 0x000000060b00720c */ /* 0x021fda0003f06270 */ /*01d0*/ @P0 BRA 0x240 ; /* 0x0000006000000947 */ /* 0x000fea0003800000 */ /*01e0*/ STG.E [R2.64], R6 ; /* 0x0000000602007986 */ /* 0x0001e8000c101904 */ /*01f0*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */ /* 0x0001e2000c101904 */ /*0200*/ BRA 0x240 ; /* 0x0000003000007947 */ /* 0x000fea0003800000 */ /*0210*/ ISETP.GT.AND P0, PT, R11, R6, PT ; /* 0x000000060b00720c */ /* 0x021fda0003f04270 */ /*0220*/ @P0 STG.E [R2.64], R6 ; /* 0x0000000602000986 */ /* 0x0001e8000c101904 */ /*0230*/ @P0 STG.E [R4.64], R11 ; /* 0x0000000b04000986 */ /* 0x0001e4000c101904 */ /*0240*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0250*/ SHF.R.U32.HI R9, RZ, 0x1, R9 ; /* 0x00000001ff097819 */ /* 0x000fe20000011609 */ /*0260*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe60000010000 */ /*0270*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */ /* 0x000fda0003f05270 */ /*0280*/ @P0 BRA 0x120 ; /* 0xfffffe9000000947 */ /* 0x000fea000383ffff */ /*0290*/ IMAD.SHL.U32 R7, R7, 0x2, RZ ; /* 0x0000000207077824 */ /* 0x000fca00078e00ff */ /*02a0*/ ISETP.GT.U32.AND P0, PT, R7, c[0x0][0x16c], PT ; /* 0x00005b0007007a0c */ /* 0x000fda0003f04070 */ /*02b0*/ @!P0 BRA 0xf0 ; /* 0xfffffe3000008947 */ /* 0x000fea000383ffff */ /*02c0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*02d0*/ BRA 0x2d0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17littleBinoticSortPiii .globl _Z17littleBinoticSortPiii .p2align 8 .type _Z17littleBinoticSortPiii,@function _Z17littleBinoticSortPiii: s_clause 0x2 s_load_b32 s4, s[0:1], 0x1c s_load_b32 s5, s[0:1], 0x8 s_load_b64 s[2:3], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s4, s4, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1] s_mov_b32 s4, exec_lo v_cmpx_le_u32_e64 s5, v1 s_cbranch_execz .LBB0_2 v_mov_b32_e32 v2, 0 v_bfrev_b32_e32 v0, -2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[1:2] v_add_co_u32 v2, vcc_lo, s2, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo global_store_b32 v[2:3], v0, off .LBB0_2: s_or_b32 exec_lo, exec_lo, s4 s_load_b32 s1, s[0:1], 0xc s_mov_b32 s4, 2 s_waitcnt lgkmcnt(0) s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv s_cmp_lt_u32 s1, 2 s_cbranch_scc1 .LBB0_15 v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[1:2] v_add_co_u32 v2, vcc_lo, s2, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo s_branch .LBB0_5 .LBB0_4: s_set_inst_prefetch_distance 0x2 s_lshl_b32 s4, s4, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_gt_u32 s4, s1 s_cbranch_scc1 .LBB0_15 .LBB0_5: s_cmp_eq_u32 s4, 0 s_cbranch_scc1 .LBB0_4 v_and_b32_e32 v0, s4, v1 s_mov_b32 s5, s4 s_delay_alu instid0(VALU_DEP_1) v_cmp_ne_u32_e32 vcc_lo, 0, v0 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_8 .p2align 6 .LBB0_7: s_or_b32 exec_lo, exec_lo, s7 s_cmp_lt_u32 s6, 4 s_waitcnt vmcnt(0) s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_4 .LBB0_8: s_mov_b32 s6, s5 s_lshr_b32 s5, s5, 1 s_mov_b32 s7, exec_lo v_xor_b32_e32 v4, s5, v1 s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_u32_e64 v4, v1 s_cbranch_execz .LBB0_7 v_mov_b32_e32 v5, 0 s_mov_b32 s8, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[4:5] v_add_co_u32 v4, s0, s2, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_add_co_ci_u32_e64 v5, s0, s3, v5, s0 global_load_b32 v0, v[4:5], off s_and_saveexec_b32 s0, vcc_lo s_xor_b32 s9, exec_lo, s0 s_cbranch_execz .LBB0_11 global_load_b32 v6, v[2:3], off s_waitcnt vmcnt(0) v_cmp_lt_i32_e64 s0, v6, v0 s_delay_alu instid0(VALU_DEP_1) s_and_b32 s8, s0, exec_lo .LBB0_11: s_and_not1_saveexec_b32 s9, s9 s_cbranch_execz .LBB0_13 global_load_b32 v6, v[2:3], off s_and_not1_b32 s8, s8, exec_lo s_waitcnt vmcnt(0) v_cmp_gt_i32_e64 s0, v6, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s0, s0, exec_lo s_or_b32 s8, s8, s0 .LBB0_13: s_or_b32 exec_lo, exec_lo, s9 s_delay_alu instid0(SALU_CYCLE_1) s_and_b32 exec_lo, exec_lo, s8 s_cbranch_execz .LBB0_7 s_waitcnt vmcnt(0) global_store_b32 v[2:3], v0, off global_store_b32 v[4:5], v6, off s_branch .LBB0_7 .LBB0_15: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17littleBinoticSortPiii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17littleBinoticSortPiii, .Lfunc_end0-_Z17littleBinoticSortPiii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17littleBinoticSortPiii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17littleBinoticSortPiii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00160d42_00000000-6_littleBinoticSort.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2030: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2030: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z4swapRiS_ .type _Z4swapRiS_, @function _Z4swapRiS_: .LFB2027: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2027: .size _Z4swapRiS_, .-_Z4swapRiS_ .globl _Z39__device_stub__Z17littleBinoticSortPiiiPiii .type _Z39__device_stub__Z17littleBinoticSortPiiiPiii, @function _Z39__device_stub__Z17littleBinoticSortPiiiPiii: .LFB2052: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movq %rsp, %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L9 .L5: movq 104(%rsp), %rax subq %fs:40, %rax jne .L10 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z17littleBinoticSortPiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L5 .L10: call __stack_chk_fail@PLT .cfi_endproc .LFE2052: .size _Z39__device_stub__Z17littleBinoticSortPiiiPiii, .-_Z39__device_stub__Z17littleBinoticSortPiiiPiii .globl _Z17littleBinoticSortPiii .type _Z17littleBinoticSortPiii, @function _Z17littleBinoticSortPiii: .LFB2053: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z17littleBinoticSortPiiiPiii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2053: .size _Z17littleBinoticSortPiii, .-_Z17littleBinoticSortPiii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z17littleBinoticSortPiii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2055: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z17littleBinoticSortPiii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2055: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "littleBinoticSort.hip" .globl _Z32__device_stub__littleBinoticSortPiii # -- Begin function _Z32__device_stub__littleBinoticSortPiii .p2align 4, 0x90 .type _Z32__device_stub__littleBinoticSortPiii,@function _Z32__device_stub__littleBinoticSortPiii: # @_Z32__device_stub__littleBinoticSortPiii .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) movq %rsp, %rax movq %rax, 80(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z17littleBinoticSortPiii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z32__device_stub__littleBinoticSortPiii, .Lfunc_end0-_Z32__device_stub__littleBinoticSortPiii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17littleBinoticSortPiii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z17littleBinoticSortPiii,@object # @_Z17littleBinoticSortPiii .section .rodata,"a",@progbits .globl _Z17littleBinoticSortPiii .p2align 3, 0x0 _Z17littleBinoticSortPiii: .quad _Z32__device_stub__littleBinoticSortPiii .size _Z17littleBinoticSortPiii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z17littleBinoticSortPiii" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z32__device_stub__littleBinoticSortPiii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z17littleBinoticSortPiii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> /** * HOST: Handle the CUDA Errors. */ #define HANDLE_CUDA_ERROR( cuda_expression ) { assertGpuError( ( cuda_expression ), __FILE__, __LINE__ ); } inline void assertGpuError( cudaError_t error_index, const char *error_file, const unsigned error_line ) { if ( error_index != cudaSuccess ) { fprintf( stderr, "\n\n\n***\nCUDA ERROR :: %s [LINE %u] ---> %s.\n***\n\n\n", error_file, error_line, cudaGetErrorString( error_index ) ); cudaDeviceReset(); exit( EXIT_FAILURE ); } } int main ( void ) { int num_gpu; HANDLE_CUDA_ERROR( cudaGetDeviceCount( &num_gpu ) ); printf( "\n***\n* Total number of GPU devices currently available: %d.\n***\n", num_gpu ); cudaDeviceProp gpu_info; for ( int ind_gpu = 0; ind_gpu < num_gpu; ind_gpu++ ) { HANDLE_CUDA_ERROR( cudaGetDeviceProperties( &gpu_info, ind_gpu ) ); printf( "\n*************************************************\n" ); printf( "*** General Info. for GPU Device Index: %d.\n", ind_gpu ); printf( "* Device name: %s.\n", gpu_info.name ); printf( "* Compute mode: %d.\n", gpu_info.computeMode ); printf( "* Compute capability: %d.%d.\n", gpu_info.major, gpu_info.minor ); printf( "* Kernel execution timeout: %s.\n", gpu_info.kernelExecTimeoutEnabled ? "enabled" : "disabled" ); printf( "*** Memory Info.:\n" ); printf( "* Total global memory: %zu (~= %5.2lf GB).\n", gpu_info.totalGlobalMem, ( double ) gpu_info.totalGlobalMem / pow( 2.0, 30 ) ); printf( "* Shared memory per block: %zu (~= %5.2lf KB).\n", gpu_info.sharedMemPerBlock, ( double ) gpu_info.sharedMemPerBlock / pow( 2.0, 10 ) ); printf( "*** MP Info.:\n" ); printf( "* Multi-processor num: %d.\n", gpu_info.multiProcessorCount ); printf( "* Max threads per blocks: %d.\n", gpu_info.maxThreadsPerBlock ); printf( "* Max thread dim: (%d, %d, %d).\n", gpu_info.maxThreadsDim[ 0 ], gpu_info.maxThreadsDim[ 1 ], gpu_info.maxThreadsDim[ 2 ] ); printf( "* Max grid dim: (%d, %d, %d).\n", gpu_info.maxGridSize[ 0 ], gpu_info.maxGridSize[ 1 ], gpu_info.maxGridSize[ 2 ] ); } }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> /** * HOST: Handle the CUDA Errors. */ #define HANDLE_CUDA_ERROR( cuda_expression ) { assertGpuError( ( cuda_expression ), __FILE__, __LINE__ ); } inline void assertGpuError( cudaError_t error_index, const char *error_file, const unsigned error_line ) { if ( error_index != cudaSuccess ) { fprintf( stderr, "\n\n\n***\nCUDA ERROR :: %s [LINE %u] ---> %s.\n***\n\n\n", error_file, error_line, cudaGetErrorString( error_index ) ); cudaDeviceReset(); exit( EXIT_FAILURE ); } } int main ( void ) { int num_gpu; HANDLE_CUDA_ERROR( cudaGetDeviceCount( &num_gpu ) ); printf( "\n***\n* Total number of GPU devices currently available: %d.\n***\n", num_gpu ); cudaDeviceProp gpu_info; for ( int ind_gpu = 0; ind_gpu < num_gpu; ind_gpu++ ) { HANDLE_CUDA_ERROR( cudaGetDeviceProperties( &gpu_info, ind_gpu ) ); printf( "\n*************************************************\n" ); printf( "*** General Info. for GPU Device Index: %d.\n", ind_gpu ); printf( "* Device name: %s.\n", gpu_info.name ); printf( "* Compute mode: %d.\n", gpu_info.computeMode ); printf( "* Compute capability: %d.%d.\n", gpu_info.major, gpu_info.minor ); printf( "* Kernel execution timeout: %s.\n", gpu_info.kernelExecTimeoutEnabled ? "enabled" : "disabled" ); printf( "*** Memory Info.:\n" ); printf( "* Total global memory: %zu (~= %5.2lf GB).\n", gpu_info.totalGlobalMem, ( double ) gpu_info.totalGlobalMem / pow( 2.0, 30 ) ); printf( "* Shared memory per block: %zu (~= %5.2lf KB).\n", gpu_info.sharedMemPerBlock, ( double ) gpu_info.sharedMemPerBlock / pow( 2.0, 10 ) ); printf( "*** MP Info.:\n" ); printf( "* Multi-processor num: %d.\n", gpu_info.multiProcessorCount ); printf( "* Max threads per blocks: %d.\n", gpu_info.maxThreadsPerBlock ); printf( "* Max thread dim: (%d, %d, %d).\n", gpu_info.maxThreadsDim[ 0 ], gpu_info.maxThreadsDim[ 1 ], gpu_info.maxThreadsDim[ 2 ] ); printf( "* Max grid dim: (%d, %d, %d).\n", gpu_info.maxGridSize[ 0 ], gpu_info.maxGridSize[ 1 ], gpu_info.maxGridSize[ 2 ] ); } }
.file "tmpxft_00052055_00000000-6_get_gpu_info.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "enabled" .LC1: .string "disabled" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC2: .string "/home/ubuntu/Datasets/stackv2/train-structured/QiqiDuan/OPAL-Lab/master/CUDA/docs/get_gpu_info.cu" .align 8 .LC3: .string "\n\n\n***\nCUDA ERROR :: %s [LINE %u] ---> %s.\n***\n\n\n" .align 8 .LC4: .string "\n***\n* Total number of GPU devices currently available: %d.\n***\n" .align 8 .LC5: .string "\n*************************************************\n" .align 8 .LC6: .string "*** General Info. for GPU Device Index: %d.\n" .section .rodata.str1.1 .LC7: .string "* Device name: %s.\n" .LC8: .string "* Compute mode: %d.\n" .LC9: .string "* Compute capability: %d.%d.\n" .section .rodata.str1.8 .align 8 .LC10: .string "* Kernel execution timeout: %s.\n" .section .rodata.str1.1 .LC11: .string "*** Memory Info.:\n" .section .rodata.str1.8 .align 8 .LC13: .string "* Total global memory: %zu (~= %5.2lf GB).\n" .align 8 .LC15: .string "* Shared memory per block: %zu (~= %5.2lf KB).\n" .section .rodata.str1.1 .LC16: .string "*** MP Info.:\n" .LC17: .string "* Multi-processor num: %d.\n" .section .rodata.str1.8 .align 8 .LC18: .string "* Max threads per blocks: %d.\n" .align 8 .LC19: .string "* Max thread dim: (%d, %d, %d).\n" .align 8 .LC20: .string "* Max grid dim: (%d, %d, %d).\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $1056, %rsp .cfi_def_cfa_offset 1088 movq %fs:40, %rax movq %rax, 1048(%rsp) xorl %eax, %eax leaq 12(%rsp), %rdi call cudaGetDeviceCount@PLT testl %eax, %eax jne .L17 movl 12(%rsp), %edx leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT cmpl $0, 12(%rsp) jle .L5 movl $0, %ebx leaq .LC5(%rip), %r12 leaq .LC6(%rip), %rbp jmp .L12 .L17: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %r9 movl $27, %r8d leaq .LC2(%rip), %rcx leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT call cudaDeviceReset@PLT movl $1, %edi call exit@PLT .L18: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %r9 movl $32, %r8d leaq .LC2(%rip), %rcx leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT call cudaDeviceReset@PLT movl $1, %edi call exit@PLT .L8: movq %rdx, %rax shrq %rax movq %rdx, %rcx andl $1, %ecx orq %rcx, %rax pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 addsd %xmm0, %xmm0 jmp .L9 .L10: movq %rdx, %rax shrq %rax movq %rdx, %rcx andl $1, %ecx orq %rcx, %rax pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 addsd %xmm0, %xmm0 .L11: mulsd .LC14(%rip), %xmm0 leaq .LC15(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT leaq .LC16(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 404(%rsp), %edx leaq .LC17(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 336(%rsp), %edx leaq .LC18(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 348(%rsp), %r8d movl 344(%rsp), %ecx movl 340(%rsp), %edx leaq .LC19(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 360(%rsp), %r8d movl 356(%rsp), %ecx movl 352(%rsp), %edx leaq .LC20(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %ebx cmpl %ebx, 12(%rsp) jle .L5 .L12: leaq 16(%rsp), %rdi movl %ebx, %esi call cudaGetDeviceProperties_v2@PLT testl %eax, %eax jne .L18 movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl %ebx, %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 16(%rsp), %rdx leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 420(%rsp), %edx leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 380(%rsp), %ecx movl 376(%rsp), %edx leaq .LC9(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT cmpl $0, 408(%rsp) leaq .LC1(%rip), %rdx leaq .LC0(%rip), %rax cmovne %rax, %rdx leaq .LC10(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 304(%rsp), %rdx testq %rdx, %rdx js .L8 pxor %xmm0, %xmm0 cvtsi2sdq %rdx, %xmm0 .L9: mulsd .LC12(%rip), %xmm0 leaq .LC13(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 312(%rsp), %rdx testq %rdx, %rdx js .L10 pxor %xmm0, %xmm0 cvtsi2sdq %rdx, %xmm0 jmp .L11 .L5: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L19 movl $0, %eax addq $1056, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L19: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC12: .long 0 .long 1041235968 .align 8 .LC14: .long 0 .long 1062207488 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> /** * HOST: Handle the CUDA Errors. */ #define HANDLE_CUDA_ERROR( cuda_expression ) { assertGpuError( ( cuda_expression ), __FILE__, __LINE__ ); } inline void assertGpuError( cudaError_t error_index, const char *error_file, const unsigned error_line ) { if ( error_index != cudaSuccess ) { fprintf( stderr, "\n\n\n***\nCUDA ERROR :: %s [LINE %u] ---> %s.\n***\n\n\n", error_file, error_line, cudaGetErrorString( error_index ) ); cudaDeviceReset(); exit( EXIT_FAILURE ); } } int main ( void ) { int num_gpu; HANDLE_CUDA_ERROR( cudaGetDeviceCount( &num_gpu ) ); printf( "\n***\n* Total number of GPU devices currently available: %d.\n***\n", num_gpu ); cudaDeviceProp gpu_info; for ( int ind_gpu = 0; ind_gpu < num_gpu; ind_gpu++ ) { HANDLE_CUDA_ERROR( cudaGetDeviceProperties( &gpu_info, ind_gpu ) ); printf( "\n*************************************************\n" ); printf( "*** General Info. for GPU Device Index: %d.\n", ind_gpu ); printf( "* Device name: %s.\n", gpu_info.name ); printf( "* Compute mode: %d.\n", gpu_info.computeMode ); printf( "* Compute capability: %d.%d.\n", gpu_info.major, gpu_info.minor ); printf( "* Kernel execution timeout: %s.\n", gpu_info.kernelExecTimeoutEnabled ? "enabled" : "disabled" ); printf( "*** Memory Info.:\n" ); printf( "* Total global memory: %zu (~= %5.2lf GB).\n", gpu_info.totalGlobalMem, ( double ) gpu_info.totalGlobalMem / pow( 2.0, 30 ) ); printf( "* Shared memory per block: %zu (~= %5.2lf KB).\n", gpu_info.sharedMemPerBlock, ( double ) gpu_info.sharedMemPerBlock / pow( 2.0, 10 ) ); printf( "*** MP Info.:\n" ); printf( "* Multi-processor num: %d.\n", gpu_info.multiProcessorCount ); printf( "* Max threads per blocks: %d.\n", gpu_info.maxThreadsPerBlock ); printf( "* Max thread dim: (%d, %d, %d).\n", gpu_info.maxThreadsDim[ 0 ], gpu_info.maxThreadsDim[ 1 ], gpu_info.maxThreadsDim[ 2 ] ); printf( "* Max grid dim: (%d, %d, %d).\n", gpu_info.maxGridSize[ 0 ], gpu_info.maxGridSize[ 1 ], gpu_info.maxGridSize[ 2 ] ); } }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> /** * HOST: Handle the CUDA Errors. */ #define HANDLE_CUDA_ERROR( cuda_expression ) { assertGpuError( ( cuda_expression ), __FILE__, __LINE__ ); } inline void assertGpuError( hipError_t error_index, const char *error_file, const unsigned error_line ) { if ( error_index != hipSuccess ) { fprintf( stderr, "\n\n\n***\nCUDA ERROR :: %s [LINE %u] ---> %s.\n***\n\n\n", error_file, error_line, hipGetErrorString( error_index ) ); hipDeviceReset(); exit( EXIT_FAILURE ); } } int main ( void ) { int num_gpu; HANDLE_CUDA_ERROR( hipGetDeviceCount( &num_gpu ) ); printf( "\n***\n* Total number of GPU devices currently available: %d.\n***\n", num_gpu ); hipDeviceProp_t gpu_info; for ( int ind_gpu = 0; ind_gpu < num_gpu; ind_gpu++ ) { HANDLE_CUDA_ERROR( hipGetDeviceProperties( &gpu_info, ind_gpu ) ); printf( "\n*************************************************\n" ); printf( "*** General Info. for GPU Device Index: %d.\n", ind_gpu ); printf( "* Device name: %s.\n", gpu_info.name ); printf( "* Compute mode: %d.\n", gpu_info.computeMode ); printf( "* Compute capability: %d.%d.\n", gpu_info.major, gpu_info.minor ); printf( "* Kernel execution timeout: %s.\n", gpu_info.kernelExecTimeoutEnabled ? "enabled" : "disabled" ); printf( "*** Memory Info.:\n" ); printf( "* Total global memory: %zu (~= %5.2lf GB).\n", gpu_info.totalGlobalMem, ( double ) gpu_info.totalGlobalMem / pow( 2.0, 30 ) ); printf( "* Shared memory per block: %zu (~= %5.2lf KB).\n", gpu_info.sharedMemPerBlock, ( double ) gpu_info.sharedMemPerBlock / pow( 2.0, 10 ) ); printf( "*** MP Info.:\n" ); printf( "* Multi-processor num: %d.\n", gpu_info.multiProcessorCount ); printf( "* Max threads per blocks: %d.\n", gpu_info.maxThreadsPerBlock ); printf( "* Max thread dim: (%d, %d, %d).\n", gpu_info.maxThreadsDim[ 0 ], gpu_info.maxThreadsDim[ 1 ], gpu_info.maxThreadsDim[ 2 ] ); printf( "* Max grid dim: (%d, %d, %d).\n", gpu_info.maxGridSize[ 0 ], gpu_info.maxGridSize[ 1 ], gpu_info.maxGridSize[ 2 ] ); } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> /** * HOST: Handle the CUDA Errors. */ #define HANDLE_CUDA_ERROR( cuda_expression ) { assertGpuError( ( cuda_expression ), __FILE__, __LINE__ ); } inline void assertGpuError( hipError_t error_index, const char *error_file, const unsigned error_line ) { if ( error_index != hipSuccess ) { fprintf( stderr, "\n\n\n***\nCUDA ERROR :: %s [LINE %u] ---> %s.\n***\n\n\n", error_file, error_line, hipGetErrorString( error_index ) ); hipDeviceReset(); exit( EXIT_FAILURE ); } } int main ( void ) { int num_gpu; HANDLE_CUDA_ERROR( hipGetDeviceCount( &num_gpu ) ); printf( "\n***\n* Total number of GPU devices currently available: %d.\n***\n", num_gpu ); hipDeviceProp_t gpu_info; for ( int ind_gpu = 0; ind_gpu < num_gpu; ind_gpu++ ) { HANDLE_CUDA_ERROR( hipGetDeviceProperties( &gpu_info, ind_gpu ) ); printf( "\n*************************************************\n" ); printf( "*** General Info. for GPU Device Index: %d.\n", ind_gpu ); printf( "* Device name: %s.\n", gpu_info.name ); printf( "* Compute mode: %d.\n", gpu_info.computeMode ); printf( "* Compute capability: %d.%d.\n", gpu_info.major, gpu_info.minor ); printf( "* Kernel execution timeout: %s.\n", gpu_info.kernelExecTimeoutEnabled ? "enabled" : "disabled" ); printf( "*** Memory Info.:\n" ); printf( "* Total global memory: %zu (~= %5.2lf GB).\n", gpu_info.totalGlobalMem, ( double ) gpu_info.totalGlobalMem / pow( 2.0, 30 ) ); printf( "* Shared memory per block: %zu (~= %5.2lf KB).\n", gpu_info.sharedMemPerBlock, ( double ) gpu_info.sharedMemPerBlock / pow( 2.0, 10 ) ); printf( "*** MP Info.:\n" ); printf( "* Multi-processor num: %d.\n", gpu_info.multiProcessorCount ); printf( "* Max threads per blocks: %d.\n", gpu_info.maxThreadsPerBlock ); printf( "* Max thread dim: (%d, %d, %d).\n", gpu_info.maxThreadsDim[ 0 ], gpu_info.maxThreadsDim[ 1 ], gpu_info.maxThreadsDim[ 2 ] ); printf( "* Max grid dim: (%d, %d, %d).\n", gpu_info.maxGridSize[ 0 ], gpu_info.maxGridSize[ 1 ], gpu_info.maxGridSize[ 2 ] ); } }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> /** * HOST: Handle the CUDA Errors. */ #define HANDLE_CUDA_ERROR( cuda_expression ) { assertGpuError( ( cuda_expression ), __FILE__, __LINE__ ); } inline void assertGpuError( hipError_t error_index, const char *error_file, const unsigned error_line ) { if ( error_index != hipSuccess ) { fprintf( stderr, "\n\n\n***\nCUDA ERROR :: %s [LINE %u] ---> %s.\n***\n\n\n", error_file, error_line, hipGetErrorString( error_index ) ); hipDeviceReset(); exit( EXIT_FAILURE ); } } int main ( void ) { int num_gpu; HANDLE_CUDA_ERROR( hipGetDeviceCount( &num_gpu ) ); printf( "\n***\n* Total number of GPU devices currently available: %d.\n***\n", num_gpu ); hipDeviceProp_t gpu_info; for ( int ind_gpu = 0; ind_gpu < num_gpu; ind_gpu++ ) { HANDLE_CUDA_ERROR( hipGetDeviceProperties( &gpu_info, ind_gpu ) ); printf( "\n*************************************************\n" ); printf( "*** General Info. for GPU Device Index: %d.\n", ind_gpu ); printf( "* Device name: %s.\n", gpu_info.name ); printf( "* Compute mode: %d.\n", gpu_info.computeMode ); printf( "* Compute capability: %d.%d.\n", gpu_info.major, gpu_info.minor ); printf( "* Kernel execution timeout: %s.\n", gpu_info.kernelExecTimeoutEnabled ? "enabled" : "disabled" ); printf( "*** Memory Info.:\n" ); printf( "* Total global memory: %zu (~= %5.2lf GB).\n", gpu_info.totalGlobalMem, ( double ) gpu_info.totalGlobalMem / pow( 2.0, 30 ) ); printf( "* Shared memory per block: %zu (~= %5.2lf KB).\n", gpu_info.sharedMemPerBlock, ( double ) gpu_info.sharedMemPerBlock / pow( 2.0, 10 ) ); printf( "*** MP Info.:\n" ); printf( "* Multi-processor num: %d.\n", gpu_info.multiProcessorCount ); printf( "* Max threads per blocks: %d.\n", gpu_info.maxThreadsPerBlock ); printf( "* Max thread dim: (%d, %d, %d).\n", gpu_info.maxThreadsDim[ 0 ], gpu_info.maxThreadsDim[ 1 ], gpu_info.maxThreadsDim[ 2 ] ); printf( "* Max grid dim: (%d, %d, %d).\n", gpu_info.maxGridSize[ 0 ], gpu_info.maxGridSize[ 1 ], gpu_info.maxGridSize[ 2 ] ); } }
.text .file "get_gpu_info.hip" .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 # -- Begin function main .LCPI0_0: .long 1127219200 # 0x43300000 .long 1160773632 # 0x45300000 .long 0 # 0x0 .long 0 # 0x0 .LCPI0_1: .quad 0x4330000000000000 # double 4503599627370496 .quad 0x4530000000000000 # double 1.9342813113834067E+25 .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI0_2: .quad 0x3e10000000000000 # double 9.3132257461547852E-10 .LCPI0_3: .quad 0x3f50000000000000 # double 9.765625E-4 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $1488, %rsp # imm = 0x5D0 .cfi_def_cfa_offset 1520 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %rbp, -16 leaq 12(%rsp), %rdi callq hipGetDeviceCount testl %eax, %eax jne .LBB0_1 # %bb.3: # %_Z14assertGpuError10hipError_tPKcj.exit movl 12(%rsp), %esi movl $.L.str.1, %edi xorl %eax, %eax callq printf cmpl $0, 12(%rsp) jle .LBB0_8 # %bb.4: # %.lr.ph leaq 16(%rsp), %rbx movl $.L.str.9, %r14d xorl %ebp, %ebp .p2align 4, 0x90 .LBB0_5: # =>This Inner Loop Header: Depth=1 movq %rbx, %rdi movl %ebp, %esi callq hipGetDevicePropertiesR0600 testl %eax, %eax jne .LBB0_6 # %bb.7: # %_Z14assertGpuError10hipError_tPKcj.exit8 # in Loop: Header=BB0_5 Depth=1 movl $.Lstr, %edi callq puts@PLT movl $.L.str.3, %edi movl %ebp, %esi xorl %eax, %eax callq printf movl $.L.str.4, %edi movq %rbx, %rsi xorl %eax, %eax callq printf movl 420(%rsp), %esi movl $.L.str.5, %edi xorl %eax, %eax callq printf movl 376(%rsp), %esi movl 380(%rsp), %edx movl $.L.str.6, %edi xorl %eax, %eax callq printf cmpl $0, 408(%rsp) movl $.L.str.8, %esi cmoveq %r14, %rsi movl $.L.str.7, %edi xorl %eax, %eax callq printf movl $.Lstr.1, %edi callq puts@PLT movq 304(%rsp), %rsi movq %rsi, %xmm1 movdqa .LCPI0_0(%rip), %xmm0 # xmm0 = [1127219200,1160773632,0,0] punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movapd .LCPI0_1(%rip), %xmm0 # xmm0 = [4.503599627370496E+15,1.9342813113834067E+25] subpd %xmm0, %xmm1 movapd %xmm1, %xmm0 unpckhpd %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] addsd %xmm1, %xmm0 mulsd .LCPI0_2(%rip), %xmm0 movl $.L.str.11, %edi movb $1, %al callq printf movq 312(%rsp), %rsi movq %rsi, %xmm1 punpckldq .LCPI0_0(%rip), %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] subpd .LCPI0_1(%rip), %xmm1 movapd %xmm1, %xmm0 unpckhpd %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] addsd %xmm1, %xmm0 mulsd .LCPI0_3(%rip), %xmm0 movl $.L.str.12, %edi movb $1, %al callq printf movl $.Lstr.2, %edi callq puts@PLT movl 404(%rsp), %esi movl $.L.str.14, %edi xorl %eax, %eax callq printf movl 336(%rsp), %esi movl $.L.str.15, %edi xorl %eax, %eax callq printf movl 340(%rsp), %esi movl 344(%rsp), %edx movl 348(%rsp), %ecx movl $.L.str.16, %edi xorl %eax, %eax callq printf movl 352(%rsp), %esi movl 356(%rsp), %edx movl 360(%rsp), %ecx movl $.L.str.17, %edi xorl %eax, %eax callq printf incl %ebp cmpl 12(%rsp), %ebp jl .LBB0_5 .LBB0_8: # %._crit_edge xorl %eax, %eax addq $1488, %rsp # imm = 0x5D0 .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB0_6: .cfi_def_cfa_offset 1520 movq stderr(%rip), %rbx movl %eax, %edi callq hipGetErrorString movl $.L.str.18, %esi movl $.L.str, %edx movq %rbx, %rdi movl $32, %ecx .LBB0_2: movq %rax, %r8 xorl %eax, %eax callq fprintf callq hipDeviceReset movl $1, %edi callq exit .LBB0_1: movq stderr(%rip), %rbx movl %eax, %edi callq hipGetErrorString movl $.L.str.18, %esi movl $.L.str, %edx movq %rbx, %rdi movl $27, %ecx jmp .LBB0_2 .Lfunc_end0: .size main, .Lfunc_end0-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/QiqiDuan/OPAL-Lab/master/CUDA/docs/get_gpu_info.hip" .size .L.str, 109 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "\n***\n* Total number of GPU devices currently available: %d.\n***\n" .size .L.str.1, 65 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "*** General Info. for GPU Device Index: %d.\n" .size .L.str.3, 45 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "* Device name: %s.\n" .size .L.str.4, 20 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "* Compute mode: %d.\n" .size .L.str.5, 21 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "* Compute capability: %d.%d.\n" .size .L.str.6, 30 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "* Kernel execution timeout: %s.\n" .size .L.str.7, 33 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "enabled" .size .L.str.8, 8 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz "disabled" .size .L.str.9, 9 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz "* Total global memory: %zu (~= %5.2lf GB).\n" .size .L.str.11, 44 .type .L.str.12,@object # @.str.12 .L.str.12: .asciz "* Shared memory per block: %zu (~= %5.2lf KB).\n" .size .L.str.12, 48 .type .L.str.14,@object # @.str.14 .L.str.14: .asciz "* Multi-processor num: %d.\n" .size .L.str.14, 28 .type .L.str.15,@object # @.str.15 .L.str.15: .asciz "* Max threads per blocks: %d.\n" .size .L.str.15, 31 .type .L.str.16,@object # @.str.16 .L.str.16: .asciz "* Max thread dim: (%d, %d, %d).\n" .size .L.str.16, 33 .type .L.str.17,@object # @.str.17 .L.str.17: .asciz "* Max grid dim: (%d, %d, %d).\n" .size .L.str.17, 31 .type .L.str.18,@object # @.str.18 .L.str.18: .asciz "\n\n\n***\nCUDA ERROR :: %s [LINE %u] ---> %s.\n***\n\n\n" .size .L.str.18, 50 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "\n*************************************************" .size .Lstr, 51 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "*** Memory Info.:" .size .Lstr.1, 18 .type .Lstr.2,@object # @str.2 .Lstr.2: .asciz "*** MP Info.:" .size .Lstr.2, 14 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00052055_00000000-6_get_gpu_info.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "enabled" .LC1: .string "disabled" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC2: .string "/home/ubuntu/Datasets/stackv2/train-structured/QiqiDuan/OPAL-Lab/master/CUDA/docs/get_gpu_info.cu" .align 8 .LC3: .string "\n\n\n***\nCUDA ERROR :: %s [LINE %u] ---> %s.\n***\n\n\n" .align 8 .LC4: .string "\n***\n* Total number of GPU devices currently available: %d.\n***\n" .align 8 .LC5: .string "\n*************************************************\n" .align 8 .LC6: .string "*** General Info. for GPU Device Index: %d.\n" .section .rodata.str1.1 .LC7: .string "* Device name: %s.\n" .LC8: .string "* Compute mode: %d.\n" .LC9: .string "* Compute capability: %d.%d.\n" .section .rodata.str1.8 .align 8 .LC10: .string "* Kernel execution timeout: %s.\n" .section .rodata.str1.1 .LC11: .string "*** Memory Info.:\n" .section .rodata.str1.8 .align 8 .LC13: .string "* Total global memory: %zu (~= %5.2lf GB).\n" .align 8 .LC15: .string "* Shared memory per block: %zu (~= %5.2lf KB).\n" .section .rodata.str1.1 .LC16: .string "*** MP Info.:\n" .LC17: .string "* Multi-processor num: %d.\n" .section .rodata.str1.8 .align 8 .LC18: .string "* Max threads per blocks: %d.\n" .align 8 .LC19: .string "* Max thread dim: (%d, %d, %d).\n" .align 8 .LC20: .string "* Max grid dim: (%d, %d, %d).\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $1056, %rsp .cfi_def_cfa_offset 1088 movq %fs:40, %rax movq %rax, 1048(%rsp) xorl %eax, %eax leaq 12(%rsp), %rdi call cudaGetDeviceCount@PLT testl %eax, %eax jne .L17 movl 12(%rsp), %edx leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT cmpl $0, 12(%rsp) jle .L5 movl $0, %ebx leaq .LC5(%rip), %r12 leaq .LC6(%rip), %rbp jmp .L12 .L17: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %r9 movl $27, %r8d leaq .LC2(%rip), %rcx leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT call cudaDeviceReset@PLT movl $1, %edi call exit@PLT .L18: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %r9 movl $32, %r8d leaq .LC2(%rip), %rcx leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT call cudaDeviceReset@PLT movl $1, %edi call exit@PLT .L8: movq %rdx, %rax shrq %rax movq %rdx, %rcx andl $1, %ecx orq %rcx, %rax pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 addsd %xmm0, %xmm0 jmp .L9 .L10: movq %rdx, %rax shrq %rax movq %rdx, %rcx andl $1, %ecx orq %rcx, %rax pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 addsd %xmm0, %xmm0 .L11: mulsd .LC14(%rip), %xmm0 leaq .LC15(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT leaq .LC16(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 404(%rsp), %edx leaq .LC17(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 336(%rsp), %edx leaq .LC18(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 348(%rsp), %r8d movl 344(%rsp), %ecx movl 340(%rsp), %edx leaq .LC19(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 360(%rsp), %r8d movl 356(%rsp), %ecx movl 352(%rsp), %edx leaq .LC20(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %ebx cmpl %ebx, 12(%rsp) jle .L5 .L12: leaq 16(%rsp), %rdi movl %ebx, %esi call cudaGetDeviceProperties_v2@PLT testl %eax, %eax jne .L18 movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl %ebx, %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 16(%rsp), %rdx leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 420(%rsp), %edx leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl 380(%rsp), %ecx movl 376(%rsp), %edx leaq .LC9(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT cmpl $0, 408(%rsp) leaq .LC1(%rip), %rdx leaq .LC0(%rip), %rax cmovne %rax, %rdx leaq .LC10(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 304(%rsp), %rdx testq %rdx, %rdx js .L8 pxor %xmm0, %xmm0 cvtsi2sdq %rdx, %xmm0 .L9: mulsd .LC12(%rip), %xmm0 leaq .LC13(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 312(%rsp), %rdx testq %rdx, %rdx js .L10 pxor %xmm0, %xmm0 cvtsi2sdq %rdx, %xmm0 jmp .L11 .L5: movq 1048(%rsp), %rax subq %fs:40, %rax jne .L19 movl $0, %eax addq $1056, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L19: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC12: .long 0 .long 1041235968 .align 8 .LC14: .long 0 .long 1062207488 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "get_gpu_info.hip" .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 # -- Begin function main .LCPI0_0: .long 1127219200 # 0x43300000 .long 1160773632 # 0x45300000 .long 0 # 0x0 .long 0 # 0x0 .LCPI0_1: .quad 0x4330000000000000 # double 4503599627370496 .quad 0x4530000000000000 # double 1.9342813113834067E+25 .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI0_2: .quad 0x3e10000000000000 # double 9.3132257461547852E-10 .LCPI0_3: .quad 0x3f50000000000000 # double 9.765625E-4 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $1488, %rsp # imm = 0x5D0 .cfi_def_cfa_offset 1520 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %rbp, -16 leaq 12(%rsp), %rdi callq hipGetDeviceCount testl %eax, %eax jne .LBB0_1 # %bb.3: # %_Z14assertGpuError10hipError_tPKcj.exit movl 12(%rsp), %esi movl $.L.str.1, %edi xorl %eax, %eax callq printf cmpl $0, 12(%rsp) jle .LBB0_8 # %bb.4: # %.lr.ph leaq 16(%rsp), %rbx movl $.L.str.9, %r14d xorl %ebp, %ebp .p2align 4, 0x90 .LBB0_5: # =>This Inner Loop Header: Depth=1 movq %rbx, %rdi movl %ebp, %esi callq hipGetDevicePropertiesR0600 testl %eax, %eax jne .LBB0_6 # %bb.7: # %_Z14assertGpuError10hipError_tPKcj.exit8 # in Loop: Header=BB0_5 Depth=1 movl $.Lstr, %edi callq puts@PLT movl $.L.str.3, %edi movl %ebp, %esi xorl %eax, %eax callq printf movl $.L.str.4, %edi movq %rbx, %rsi xorl %eax, %eax callq printf movl 420(%rsp), %esi movl $.L.str.5, %edi xorl %eax, %eax callq printf movl 376(%rsp), %esi movl 380(%rsp), %edx movl $.L.str.6, %edi xorl %eax, %eax callq printf cmpl $0, 408(%rsp) movl $.L.str.8, %esi cmoveq %r14, %rsi movl $.L.str.7, %edi xorl %eax, %eax callq printf movl $.Lstr.1, %edi callq puts@PLT movq 304(%rsp), %rsi movq %rsi, %xmm1 movdqa .LCPI0_0(%rip), %xmm0 # xmm0 = [1127219200,1160773632,0,0] punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movapd .LCPI0_1(%rip), %xmm0 # xmm0 = [4.503599627370496E+15,1.9342813113834067E+25] subpd %xmm0, %xmm1 movapd %xmm1, %xmm0 unpckhpd %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] addsd %xmm1, %xmm0 mulsd .LCPI0_2(%rip), %xmm0 movl $.L.str.11, %edi movb $1, %al callq printf movq 312(%rsp), %rsi movq %rsi, %xmm1 punpckldq .LCPI0_0(%rip), %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] subpd .LCPI0_1(%rip), %xmm1 movapd %xmm1, %xmm0 unpckhpd %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] addsd %xmm1, %xmm0 mulsd .LCPI0_3(%rip), %xmm0 movl $.L.str.12, %edi movb $1, %al callq printf movl $.Lstr.2, %edi callq puts@PLT movl 404(%rsp), %esi movl $.L.str.14, %edi xorl %eax, %eax callq printf movl 336(%rsp), %esi movl $.L.str.15, %edi xorl %eax, %eax callq printf movl 340(%rsp), %esi movl 344(%rsp), %edx movl 348(%rsp), %ecx movl $.L.str.16, %edi xorl %eax, %eax callq printf movl 352(%rsp), %esi movl 356(%rsp), %edx movl 360(%rsp), %ecx movl $.L.str.17, %edi xorl %eax, %eax callq printf incl %ebp cmpl 12(%rsp), %ebp jl .LBB0_5 .LBB0_8: # %._crit_edge xorl %eax, %eax addq $1488, %rsp # imm = 0x5D0 .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB0_6: .cfi_def_cfa_offset 1520 movq stderr(%rip), %rbx movl %eax, %edi callq hipGetErrorString movl $.L.str.18, %esi movl $.L.str, %edx movq %rbx, %rdi movl $32, %ecx .LBB0_2: movq %rax, %r8 xorl %eax, %eax callq fprintf callq hipDeviceReset movl $1, %edi callq exit .LBB0_1: movq stderr(%rip), %rbx movl %eax, %edi callq hipGetErrorString movl $.L.str.18, %esi movl $.L.str, %edx movq %rbx, %rdi movl $27, %ecx jmp .LBB0_2 .Lfunc_end0: .size main, .Lfunc_end0-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/QiqiDuan/OPAL-Lab/master/CUDA/docs/get_gpu_info.hip" .size .L.str, 109 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "\n***\n* Total number of GPU devices currently available: %d.\n***\n" .size .L.str.1, 65 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "*** General Info. for GPU Device Index: %d.\n" .size .L.str.3, 45 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "* Device name: %s.\n" .size .L.str.4, 20 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "* Compute mode: %d.\n" .size .L.str.5, 21 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "* Compute capability: %d.%d.\n" .size .L.str.6, 30 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "* Kernel execution timeout: %s.\n" .size .L.str.7, 33 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "enabled" .size .L.str.8, 8 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz "disabled" .size .L.str.9, 9 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz "* Total global memory: %zu (~= %5.2lf GB).\n" .size .L.str.11, 44 .type .L.str.12,@object # @.str.12 .L.str.12: .asciz "* Shared memory per block: %zu (~= %5.2lf KB).\n" .size .L.str.12, 48 .type .L.str.14,@object # @.str.14 .L.str.14: .asciz "* Multi-processor num: %d.\n" .size .L.str.14, 28 .type .L.str.15,@object # @.str.15 .L.str.15: .asciz "* Max threads per blocks: %d.\n" .size .L.str.15, 31 .type .L.str.16,@object # @.str.16 .L.str.16: .asciz "* Max thread dim: (%d, %d, %d).\n" .size .L.str.16, 33 .type .L.str.17,@object # @.str.17 .L.str.17: .asciz "* Max grid dim: (%d, %d, %d).\n" .size .L.str.17, 31 .type .L.str.18,@object # @.str.18 .L.str.18: .asciz "\n\n\n***\nCUDA ERROR :: %s [LINE %u] ---> %s.\n***\n\n\n" .size .L.str.18, 50 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "\n*************************************************" .size .Lstr, 51 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "*** Memory Info.:" .size .Lstr.1, 18 .type .Lstr.2,@object # @str.2 .Lstr.2: .asciz "*** MP Info.:" .size .Lstr.2, 14 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <assert.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #define GRAPH_SIZE 2000 #define EDGE_COST(graph, graph_size, a, b) graph[a * graph_size + b] #define D(a, b) EDGE_COST(output, graph_size, a, b) #define INF 0x1fffffff void generate_random_graph(int *output, int graph_size) { int i, j; srand(0xdadadada); for (i = 0; i < graph_size; i++) { for (j = 0; j < graph_size; j++) { if (i == j) { D(i, j) = 0; } else { int r; r = rand() % 40; if (r > 20) { r = INF; } D(i, j) = r; } } } } void floyd_warshall_gpu(const int *graph, int graph_size, int *output) { // TODO } void floyd_warshall_cpu(const int *graph, int graph_size, int *output) { int i, j, k; memcpy(output, graph, sizeof(int) * graph_size * graph_size); for (k = 0; k < graph_size; k++) { for (i = 0; i < graph_size; i++) { for (j = 0; j < graph_size; j++) { if (D(i, k) + D(k, j) < D(i, j)) { D(i, j) = D(i, k) + D(k, j); } } } } } int main(int argc, char **argv) { #define TIMER_START() gettimeofday(&tv1, NULL) #define TIMER_STOP() \ gettimeofday(&tv2, NULL); \ timersub(&tv2, &tv1, &tv); \ time_delta = (float)tv.tv_sec + tv.tv_usec / 1000000.0 struct timeval tv1, tv2, tv; float time_delta; int *graph, *output_cpu, *output_gpu; int size; size = sizeof(int) * GRAPH_SIZE * GRAPH_SIZE; graph = (int *)malloc(size); assert(graph); output_cpu = (int *)malloc(size); assert(output_cpu); memset(output_cpu, 0, size); output_gpu = (int *)malloc(size); assert(output_gpu); generate_random_graph(graph, GRAPH_SIZE); fprintf(stderr, "running on cpu...\n"); TIMER_START(); floyd_warshall_cpu(graph, GRAPH_SIZE, output_cpu); TIMER_STOP(); fprintf(stderr, "%f secs\n", time_delta); fprintf(stderr, "running on gpu...\n"); TIMER_START(); floyd_warshall_gpu(graph, GRAPH_SIZE, output_gpu); TIMER_STOP(); fprintf(stderr, "%f secs\n", time_delta); if (memcmp(output_cpu, output_gpu, size) != 0) { fprintf(stderr, "FAIL!\n"); } return 0; }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <assert.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #define GRAPH_SIZE 2000 #define EDGE_COST(graph, graph_size, a, b) graph[a * graph_size + b] #define D(a, b) EDGE_COST(output, graph_size, a, b) #define INF 0x1fffffff void generate_random_graph(int *output, int graph_size) { int i, j; srand(0xdadadada); for (i = 0; i < graph_size; i++) { for (j = 0; j < graph_size; j++) { if (i == j) { D(i, j) = 0; } else { int r; r = rand() % 40; if (r > 20) { r = INF; } D(i, j) = r; } } } } void floyd_warshall_gpu(const int *graph, int graph_size, int *output) { // TODO } void floyd_warshall_cpu(const int *graph, int graph_size, int *output) { int i, j, k; memcpy(output, graph, sizeof(int) * graph_size * graph_size); for (k = 0; k < graph_size; k++) { for (i = 0; i < graph_size; i++) { for (j = 0; j < graph_size; j++) { if (D(i, k) + D(k, j) < D(i, j)) { D(i, j) = D(i, k) + D(k, j); } } } } } int main(int argc, char **argv) { #define TIMER_START() gettimeofday(&tv1, NULL) #define TIMER_STOP() \ gettimeofday(&tv2, NULL); \ timersub(&tv2, &tv1, &tv); \ time_delta = (float)tv.tv_sec + tv.tv_usec / 1000000.0 struct timeval tv1, tv2, tv; float time_delta; int *graph, *output_cpu, *output_gpu; int size; size = sizeof(int) * GRAPH_SIZE * GRAPH_SIZE; graph = (int *)malloc(size); assert(graph); output_cpu = (int *)malloc(size); assert(output_cpu); memset(output_cpu, 0, size); output_gpu = (int *)malloc(size); assert(output_gpu); generate_random_graph(graph, GRAPH_SIZE); fprintf(stderr, "running on cpu...\n"); TIMER_START(); floyd_warshall_cpu(graph, GRAPH_SIZE, output_cpu); TIMER_STOP(); fprintf(stderr, "%f secs\n", time_delta); fprintf(stderr, "running on gpu...\n"); TIMER_START(); floyd_warshall_gpu(graph, GRAPH_SIZE, output_gpu); TIMER_STOP(); fprintf(stderr, "%f secs\n", time_delta); if (memcmp(output_cpu, output_gpu, size) != 0) { fprintf(stderr, "FAIL!\n"); } return 0; }
.file "tmpxft_0006f9ca_00000000-6_fw_gpu.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z21generate_random_graphPii .type _Z21generate_random_graphPii, @function _Z21generate_random_graphPii: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movq %rdi, %r14 movl %esi, %r13d movl $-623191334, %edi call srand@PLT testl %r13d, %r13d jle .L3 movl $0, %r12d movl $0, %ebp movl $536870911, %r15d jmp .L5 .L14: leal (%rbx,%r12), %eax cltq movl $0, (%r14,%rax,4) .L7: leal 1(%rbx), %eax cmpl %eax, %r13d je .L13 movl %eax, %ebx .L9: cmpl %ebx, %ebp je .L14 call rand@PLT movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $36, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx leal (%rdx,%rdx,4), %ecx sall $3, %ecx subl %ecx, %eax movl %eax, %edx cmpl $21, %eax cmovge %r15d, %edx leal (%rbx,%r12), %eax cltq movl %edx, (%r14,%rax,4) jmp .L7 .L13: leal 1(%rbp), %eax addl %r13d, %r12d cmpl %ebx, %ebp je .L3 movl %eax, %ebp .L5: movl $0, %ebx jmp .L9 .L3: addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z21generate_random_graphPii, .-_Z21generate_random_graphPii .globl _Z18floyd_warshall_gpuPKiiPi .type _Z18floyd_warshall_gpuPKiiPi, @function _Z18floyd_warshall_gpuPKiiPi: .LFB2058: .cfi_startproc endbr64 ret .cfi_endproc .LFE2058: .size _Z18floyd_warshall_gpuPKiiPi, .-_Z18floyd_warshall_gpuPKiiPi .globl _Z18floyd_warshall_cpuPKiiPi .type _Z18floyd_warshall_cpuPKiiPi, @function _Z18floyd_warshall_cpuPKiiPi: .LFB2059: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movl %esi, %ebp movq %rdx, %r14 movslq %esi, %rbx movq %rbx, %rdx imulq %rbx, %rdx salq $2, %rdx movq %rdi, %rsi movq %r14, %rdi call memcpy@PLT testl %ebp, %ebp jle .L16 movq %r14, %r12 leaq 0(,%rbx,4), %r9 addq %r9, %r14 movq %rbx, %r10 negq %r10 salq $2, %r10 movl $0, %r11d movl $0, %r13d movl $0, %r15d jmp .L18 .L19: addq $4, %rax cmpq %rcx, %rax je .L25 .L20: movl (%rax,%rdi,4), %edx addl (%rsi), %edx cmpl (%rax), %edx jge .L19 movl %edx, (%rax) jmp .L19 .L25: leal 1(%r8), %eax addq %r9, %rcx addq %r9, %rsi subq %rbx, %rdi cmpl %eax, %ebp je .L21 movl %eax, %r8d .L22: leaq (%rcx,%r10), %rax jmp .L20 .L21: leal 1(%r13), %eax addq $4, %r12 addq %rbx, %r11 cmpl %r13d, %r8d je .L16 movl %eax, %r13d .L18: movq %r11, %rdi movq %r12, %rsi movq %r14, %rcx movl %r15d, %r8d jmp .L22 .L16: addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _Z18floyd_warshall_cpuPKiiPi, .-_Z18floyd_warshall_cpuPKiiPi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "running on cpu...\n" .LC2: .string "%f secs\n" .LC3: .string "running on gpu...\n" .LC4: .string "FAIL!\n" .text .globl main .type main, @function main: .LFB2060: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $48, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl $16000000, %edi call malloc@PLT movq %rax, %r12 movl $16000000, %edi call malloc@PLT movq %rax, %rbx movl $16000000, %edx movl $0, %esi movq %rax, %rdi call memset@PLT movl $16000000, %edi call malloc@PLT movq %rax, %rbp movl $2000, %esi movq %r12, %rdi call _Z21generate_random_graphPii leaq .LC0(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movq %rsp, %rdi movl $0, %esi call gettimeofday@PLT movq %rbx, %rdx movl $2000, %esi movq %r12, %rdi call _Z18floyd_warshall_cpuPKiiPi leaq 16(%rsp), %rdi movl $0, %esi call gettimeofday@PLT movq 16(%rsp), %rdx subq (%rsp), %rdx movq 24(%rsp), %rax subq 8(%rsp), %rax js .L32 .L27: pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 divsd .LC1(%rip), %xmm0 pxor %xmm1, %xmm1 cvtsi2ssq %rdx, %xmm1 cvtss2sd %xmm1, %xmm1 addsd %xmm1, %xmm0 cvtsd2ss %xmm0, %xmm0 cvtss2sd %xmm0, %xmm0 leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $1, %eax call __fprintf_chk@PLT leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movq %rsp, %rdi movl $0, %esi call gettimeofday@PLT leaq 16(%rsp), %rdi movl $0, %esi call gettimeofday@PLT movq 16(%rsp), %rdx subq (%rsp), %rdx movq 24(%rsp), %rax subq 8(%rsp), %rax js .L33 .L28: pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 divsd .LC1(%rip), %xmm0 pxor %xmm1, %xmm1 cvtsi2ssq %rdx, %xmm1 cvtss2sd %xmm1, %xmm1 addsd %xmm1, %xmm0 cvtsd2ss %xmm0, %xmm0 cvtss2sd %xmm0, %xmm0 leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $1, %eax call __fprintf_chk@PLT movl $16000000, %edx movq %rbp, %rsi movq %rbx, %rdi call memcmp@PLT testl %eax, %eax jne .L34 .L29: movq 40(%rsp), %rax subq %fs:40, %rax jne .L35 movl $0, %eax addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L32: .cfi_restore_state subq $1, %rdx addq $1000000, %rax jmp .L27 .L33: subq $1, %rdx addq $1000000, %rax jmp .L28 .L34: leaq .LC4(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L29 .L35: call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long 0 .long 1093567616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <assert.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #define GRAPH_SIZE 2000 #define EDGE_COST(graph, graph_size, a, b) graph[a * graph_size + b] #define D(a, b) EDGE_COST(output, graph_size, a, b) #define INF 0x1fffffff void generate_random_graph(int *output, int graph_size) { int i, j; srand(0xdadadada); for (i = 0; i < graph_size; i++) { for (j = 0; j < graph_size; j++) { if (i == j) { D(i, j) = 0; } else { int r; r = rand() % 40; if (r > 20) { r = INF; } D(i, j) = r; } } } } void floyd_warshall_gpu(const int *graph, int graph_size, int *output) { // TODO } void floyd_warshall_cpu(const int *graph, int graph_size, int *output) { int i, j, k; memcpy(output, graph, sizeof(int) * graph_size * graph_size); for (k = 0; k < graph_size; k++) { for (i = 0; i < graph_size; i++) { for (j = 0; j < graph_size; j++) { if (D(i, k) + D(k, j) < D(i, j)) { D(i, j) = D(i, k) + D(k, j); } } } } } int main(int argc, char **argv) { #define TIMER_START() gettimeofday(&tv1, NULL) #define TIMER_STOP() \ gettimeofday(&tv2, NULL); \ timersub(&tv2, &tv1, &tv); \ time_delta = (float)tv.tv_sec + tv.tv_usec / 1000000.0 struct timeval tv1, tv2, tv; float time_delta; int *graph, *output_cpu, *output_gpu; int size; size = sizeof(int) * GRAPH_SIZE * GRAPH_SIZE; graph = (int *)malloc(size); assert(graph); output_cpu = (int *)malloc(size); assert(output_cpu); memset(output_cpu, 0, size); output_gpu = (int *)malloc(size); assert(output_gpu); generate_random_graph(graph, GRAPH_SIZE); fprintf(stderr, "running on cpu...\n"); TIMER_START(); floyd_warshall_cpu(graph, GRAPH_SIZE, output_cpu); TIMER_STOP(); fprintf(stderr, "%f secs\n", time_delta); fprintf(stderr, "running on gpu...\n"); TIMER_START(); floyd_warshall_gpu(graph, GRAPH_SIZE, output_gpu); TIMER_STOP(); fprintf(stderr, "%f secs\n", time_delta); if (memcmp(output_cpu, output_gpu, size) != 0) { fprintf(stderr, "FAIL!\n"); } return 0; }
#include <assert.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #define GRAPH_SIZE 2000 #define EDGE_COST(graph, graph_size, a, b) graph[a * graph_size + b] #define D(a, b) EDGE_COST(output, graph_size, a, b) #define INF 0x1fffffff void generate_random_graph(int *output, int graph_size) { int i, j; srand(0xdadadada); for (i = 0; i < graph_size; i++) { for (j = 0; j < graph_size; j++) { if (i == j) { D(i, j) = 0; } else { int r; r = rand() % 40; if (r > 20) { r = INF; } D(i, j) = r; } } } } void floyd_warshall_gpu(const int *graph, int graph_size, int *output) { // TODO } void floyd_warshall_cpu(const int *graph, int graph_size, int *output) { int i, j, k; memcpy(output, graph, sizeof(int) * graph_size * graph_size); for (k = 0; k < graph_size; k++) { for (i = 0; i < graph_size; i++) { for (j = 0; j < graph_size; j++) { if (D(i, k) + D(k, j) < D(i, j)) { D(i, j) = D(i, k) + D(k, j); } } } } } int main(int argc, char **argv) { #define TIMER_START() gettimeofday(&tv1, NULL) #define TIMER_STOP() \ gettimeofday(&tv2, NULL); \ timersub(&tv2, &tv1, &tv); \ time_delta = (float)tv.tv_sec + tv.tv_usec / 1000000.0 struct timeval tv1, tv2, tv; float time_delta; int *graph, *output_cpu, *output_gpu; int size; size = sizeof(int) * GRAPH_SIZE * GRAPH_SIZE; graph = (int *)malloc(size); assert(graph); output_cpu = (int *)malloc(size); assert(output_cpu); memset(output_cpu, 0, size); output_gpu = (int *)malloc(size); assert(output_gpu); generate_random_graph(graph, GRAPH_SIZE); fprintf(stderr, "running on cpu...\n"); TIMER_START(); floyd_warshall_cpu(graph, GRAPH_SIZE, output_cpu); TIMER_STOP(); fprintf(stderr, "%f secs\n", time_delta); fprintf(stderr, "running on gpu...\n"); TIMER_START(); floyd_warshall_gpu(graph, GRAPH_SIZE, output_gpu); TIMER_STOP(); fprintf(stderr, "%f secs\n", time_delta); if (memcmp(output_cpu, output_gpu, size) != 0) { fprintf(stderr, "FAIL!\n"); } return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <assert.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #define GRAPH_SIZE 2000 #define EDGE_COST(graph, graph_size, a, b) graph[a * graph_size + b] #define D(a, b) EDGE_COST(output, graph_size, a, b) #define INF 0x1fffffff void generate_random_graph(int *output, int graph_size) { int i, j; srand(0xdadadada); for (i = 0; i < graph_size; i++) { for (j = 0; j < graph_size; j++) { if (i == j) { D(i, j) = 0; } else { int r; r = rand() % 40; if (r > 20) { r = INF; } D(i, j) = r; } } } } void floyd_warshall_gpu(const int *graph, int graph_size, int *output) { // TODO } void floyd_warshall_cpu(const int *graph, int graph_size, int *output) { int i, j, k; memcpy(output, graph, sizeof(int) * graph_size * graph_size); for (k = 0; k < graph_size; k++) { for (i = 0; i < graph_size; i++) { for (j = 0; j < graph_size; j++) { if (D(i, k) + D(k, j) < D(i, j)) { D(i, j) = D(i, k) + D(k, j); } } } } } int main(int argc, char **argv) { #define TIMER_START() gettimeofday(&tv1, NULL) #define TIMER_STOP() \ gettimeofday(&tv2, NULL); \ timersub(&tv2, &tv1, &tv); \ time_delta = (float)tv.tv_sec + tv.tv_usec / 1000000.0 struct timeval tv1, tv2, tv; float time_delta; int *graph, *output_cpu, *output_gpu; int size; size = sizeof(int) * GRAPH_SIZE * GRAPH_SIZE; graph = (int *)malloc(size); assert(graph); output_cpu = (int *)malloc(size); assert(output_cpu); memset(output_cpu, 0, size); output_gpu = (int *)malloc(size); assert(output_gpu); generate_random_graph(graph, GRAPH_SIZE); fprintf(stderr, "running on cpu...\n"); TIMER_START(); floyd_warshall_cpu(graph, GRAPH_SIZE, output_cpu); TIMER_STOP(); fprintf(stderr, "%f secs\n", time_delta); fprintf(stderr, "running on gpu...\n"); TIMER_START(); floyd_warshall_gpu(graph, GRAPH_SIZE, output_gpu); TIMER_STOP(); fprintf(stderr, "%f secs\n", time_delta); if (memcmp(output_cpu, output_gpu, size) != 0) { fprintf(stderr, "FAIL!\n"); } return 0; }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <assert.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #define GRAPH_SIZE 2000 #define EDGE_COST(graph, graph_size, a, b) graph[a * graph_size + b] #define D(a, b) EDGE_COST(output, graph_size, a, b) #define INF 0x1fffffff void generate_random_graph(int *output, int graph_size) { int i, j; srand(0xdadadada); for (i = 0; i < graph_size; i++) { for (j = 0; j < graph_size; j++) { if (i == j) { D(i, j) = 0; } else { int r; r = rand() % 40; if (r > 20) { r = INF; } D(i, j) = r; } } } } void floyd_warshall_gpu(const int *graph, int graph_size, int *output) { // TODO } void floyd_warshall_cpu(const int *graph, int graph_size, int *output) { int i, j, k; memcpy(output, graph, sizeof(int) * graph_size * graph_size); for (k = 0; k < graph_size; k++) { for (i = 0; i < graph_size; i++) { for (j = 0; j < graph_size; j++) { if (D(i, k) + D(k, j) < D(i, j)) { D(i, j) = D(i, k) + D(k, j); } } } } } int main(int argc, char **argv) { #define TIMER_START() gettimeofday(&tv1, NULL) #define TIMER_STOP() \ gettimeofday(&tv2, NULL); \ timersub(&tv2, &tv1, &tv); \ time_delta = (float)tv.tv_sec + tv.tv_usec / 1000000.0 struct timeval tv1, tv2, tv; float time_delta; int *graph, *output_cpu, *output_gpu; int size; size = sizeof(int) * GRAPH_SIZE * GRAPH_SIZE; graph = (int *)malloc(size); assert(graph); output_cpu = (int *)malloc(size); assert(output_cpu); memset(output_cpu, 0, size); output_gpu = (int *)malloc(size); assert(output_gpu); generate_random_graph(graph, GRAPH_SIZE); fprintf(stderr, "running on cpu...\n"); TIMER_START(); floyd_warshall_cpu(graph, GRAPH_SIZE, output_cpu); TIMER_STOP(); fprintf(stderr, "%f secs\n", time_delta); fprintf(stderr, "running on gpu...\n"); TIMER_START(); floyd_warshall_gpu(graph, GRAPH_SIZE, output_gpu); TIMER_STOP(); fprintf(stderr, "%f secs\n", time_delta); if (memcmp(output_cpu, output_gpu, size) != 0) { fprintf(stderr, "FAIL!\n"); } return 0; }
.text .file "fw_gpu.hip" .globl _Z21generate_random_graphPii # -- Begin function _Z21generate_random_graphPii .p2align 4, 0x90 .type _Z21generate_random_graphPii,@function _Z21generate_random_graphPii: # @_Z21generate_random_graphPii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $24, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %esi, %ebx movq %rdi, 16(%rsp) # 8-byte Spill movl $-623191334, %edi # imm = 0xDADADADA callq srand movl %ebx, 12(%rsp) # 4-byte Spill testl %ebx, %ebx jle .LBB0_7 # %bb.1: # %.preheader.lr.ph movl 12(%rsp), %r15d # 4-byte Reload xorl %r12d, %r12d movl $536870911, %ebp # imm = 0x1FFFFFFF xorl %r13d, %r13d jmp .LBB0_2 .p2align 4, 0x90 .LBB0_6: # %._crit_edge # in Loop: Header=BB0_2 Depth=1 incq %r13 addl 12(%rsp), %r12d # 4-byte Folded Reload cmpq %r15, %r13 je .LBB0_7 .LBB0_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB0_3 Depth 2 movl %r12d, %eax movq 16(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %r14 xorl %ebx, %ebx jmp .LBB0_3 .p2align 4, 0x90 .LBB0_5: # in Loop: Header=BB0_3 Depth=2 movl %eax, (%r14,%rbx,4) incq %rbx cmpq %rbx, %r15 je .LBB0_6 .LBB0_3: # Parent Loop BB0_2 Depth=1 # => This Inner Loop Header: Depth=2 xorl %eax, %eax cmpq %rbx, %r13 je .LBB0_5 # %bb.4: # in Loop: Header=BB0_3 Depth=2 callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $36, %rcx addl %edx, %ecx shll $3, %ecx leal (%rcx,%rcx,4), %ecx subl %ecx, %eax cmpl $21, %eax cmovgel %ebp, %eax jmp .LBB0_5 .LBB0_7: # %._crit_edge21 addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z21generate_random_graphPii, .Lfunc_end0-_Z21generate_random_graphPii .cfi_endproc # -- End function .globl _Z18floyd_warshall_gpuPKiiPi # -- Begin function _Z18floyd_warshall_gpuPKiiPi .p2align 4, 0x90 .type _Z18floyd_warshall_gpuPKiiPi,@function _Z18floyd_warshall_gpuPKiiPi: # @_Z18floyd_warshall_gpuPKiiPi .cfi_startproc # %bb.0: retq .Lfunc_end1: .size _Z18floyd_warshall_gpuPKiiPi, .Lfunc_end1-_Z18floyd_warshall_gpuPKiiPi .cfi_endproc # -- End function .globl _Z18floyd_warshall_cpuPKiiPi # -- Begin function _Z18floyd_warshall_cpuPKiiPi .p2align 4, 0x90 .type _Z18floyd_warshall_cpuPKiiPi,@function _Z18floyd_warshall_cpuPKiiPi: # @_Z18floyd_warshall_cpuPKiiPi .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdx, %rbx movl %esi, %ebp movq %rdi, %rsi movslq %ebp, %r14 movq %r14, %rdx imulq %r14, %rdx shlq $2, %rdx movq %rbx, %rdi callq memcpy@PLT testl %r14d, %r14d jle .LBB2_9 # %bb.1: # %.preheader40.lr.ph movl %ebp, %eax xorl %ecx, %ecx xorl %edx, %edx jmp .LBB2_2 .p2align 4, 0x90 .LBB2_8: # %._crit_edge43 # in Loop: Header=BB2_2 Depth=1 incq %rdx addl %ebp, %ecx cmpq %rax, %rdx je .LBB2_9 .LBB2_2: # %.preheader40 # =>This Loop Header: Depth=1 # Child Loop BB2_3 Depth 2 # Child Loop BB2_4 Depth 3 movl %ecx, %esi leaq (%rbx,%rsi,4), %rsi leaq (%rbx,%rdx,4), %rdi xorl %r8d, %r8d xorl %r9d, %r9d jmp .LBB2_3 .p2align 4, 0x90 .LBB2_7: # %._crit_edge # in Loop: Header=BB2_3 Depth=2 incq %r9 addl %ebp, %r8d cmpq %rax, %r9 je .LBB2_8 .LBB2_3: # %.preheader # Parent Loop BB2_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB2_4 Depth 3 movl %r8d, %r10d leaq (%rbx,%r10,4), %r10 movq %r9, %r11 imulq %rax, %r11 xorl %r14d, %r14d jmp .LBB2_4 .p2align 4, 0x90 .LBB2_6: # in Loop: Header=BB2_4 Depth=3 incq %r14 cmpq %r14, %rax je .LBB2_7 .LBB2_4: # Parent Loop BB2_2 Depth=1 # Parent Loop BB2_3 Depth=2 # => This Inner Loop Header: Depth=3 movl (%rsi,%r14,4), %r15d addl (%rdi,%r11,4), %r15d cmpl (%r10,%r14,4), %r15d jge .LBB2_6 # %bb.5: # in Loop: Header=BB2_4 Depth=3 movl %r15d, (%r10,%r14,4) jmp .LBB2_6 .LBB2_9: # %._crit_edge45 addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z18floyd_warshall_cpuPKiiPi, .Lfunc_end2-_Z18floyd_warshall_cpuPKiiPi .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI3_0: .quad 0x412e848000000000 # double 1.0E+6 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $40, %rsp .cfi_def_cfa_offset 96 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $16000000, %edi # imm = 0xF42400 callq malloc movq %rax, %r15 movl $16000000, %edi # imm = 0xF42400 callq malloc movq %rax, %rbx xorl %r12d, %r12d movl $16000000, %edx # imm = 0xF42400 movq %rax, %rdi xorl %esi, %esi callq memset@PLT movl $16000000, %edi # imm = 0xF42400 callq malloc movq %rax, 32(%rsp) # 8-byte Spill movl $-623191334, %edi # imm = 0xDADADADA callq srand movl $536870911, %ebp # imm = 0x1FFFFFFF movq %r15, %r13 jmp .LBB3_1 .p2align 4, 0x90 .LBB3_5: # %._crit_edge.i # in Loop: Header=BB3_1 Depth=1 incq %r12 addq $8000, %r13 # imm = 0x1F40 cmpq $2000, %r12 # imm = 0x7D0 je .LBB3_6 .LBB3_1: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB3_2 Depth 2 xorl %r14d, %r14d jmp .LBB3_2 .p2align 4, 0x90 .LBB3_4: # in Loop: Header=BB3_2 Depth=2 movl %eax, (%r13,%r14,4) incq %r14 cmpq $2000, %r14 # imm = 0x7D0 je .LBB3_5 .LBB3_2: # Parent Loop BB3_1 Depth=1 # => This Inner Loop Header: Depth=2 xorl %eax, %eax cmpq %r14, %r12 je .LBB3_4 # %bb.3: # in Loop: Header=BB3_2 Depth=2 callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $36, %rcx addl %edx, %ecx shll $3, %ecx leal (%rcx,%rcx,4), %ecx subl %ecx, %eax cmpl $21, %eax cmovgel %ebp, %eax jmp .LBB3_4 .LBB3_6: # %_Z21generate_random_graphPii.exit movq stderr(%rip), %rcx movl $.L.str, %edi movl $18, %esi movl $1, %edx callq fwrite@PLT xorl %r12d, %r12d movq %rsp, %rdi xorl %esi, %esi callq gettimeofday movl $16000000, %edx # imm = 0xF42400 movq %rbx, %rdi movq %r15, %rsi callq memcpy@PLT movq %rbx, %rax jmp .LBB3_7 .p2align 4, 0x90 .LBB3_13: # %._crit_edge43.i # in Loop: Header=BB3_7 Depth=1 incq %r12 addq $8000, %rax # imm = 0x1F40 cmpq $2000, %r12 # imm = 0x7D0 je .LBB3_14 .LBB3_7: # %.preheader40.i # =>This Loop Header: Depth=1 # Child Loop BB3_8 Depth 2 # Child Loop BB3_9 Depth 3 leaq (%rbx,%r12,4), %rcx movq %rbx, %rdx xorl %esi, %esi jmp .LBB3_8 .p2align 4, 0x90 .LBB3_12: # %._crit_edge.i28 # in Loop: Header=BB3_8 Depth=2 incq %rsi addq $8000, %rdx # imm = 0x1F40 cmpq $2000, %rsi # imm = 0x7D0 je .LBB3_13 .LBB3_8: # %.preheader.i24 # Parent Loop BB3_7 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB3_9 Depth 3 imulq $2000, %rsi, %rdi # imm = 0x7D0 xorl %r8d, %r8d jmp .LBB3_9 .p2align 4, 0x90 .LBB3_11: # in Loop: Header=BB3_9 Depth=3 incq %r8 cmpq $2000, %r8 # imm = 0x7D0 je .LBB3_12 .LBB3_9: # Parent Loop BB3_7 Depth=1 # Parent Loop BB3_8 Depth=2 # => This Inner Loop Header: Depth=3 movl (%rax,%r8,4), %r9d addl (%rcx,%rdi,4), %r9d cmpl (%rdx,%r8,4), %r9d jge .LBB3_11 # %bb.10: # in Loop: Header=BB3_9 Depth=3 movl %r9d, (%rdx,%r8,4) jmp .LBB3_11 .LBB3_14: # %_Z18floyd_warshall_cpuPKiiPi.exit leaq 16(%rsp), %r15 movq %r15, %rdi xorl %esi, %esi callq gettimeofday movq 16(%rsp), %rax movq 24(%rsp), %rcx subq (%rsp), %rax subq 8(%rsp), %rcx leaq 1000000(%rcx), %rdx movq %rcx, %rsi sarq $63, %rsi addq %rax, %rsi testq %rcx, %rcx cvtsi2ss %rsi, %xmm0 cmovnsq %rcx, %rdx cvtsi2sd %rdx, %xmm1 cvtss2sd %xmm0, %xmm0 divsd .LCPI3_0(%rip), %xmm1 addsd %xmm0, %xmm1 xorps %xmm0, %xmm0 cvtsd2ss %xmm1, %xmm0 movq stderr(%rip), %rdi cvtss2sd %xmm0, %xmm0 movl $.L.str.1, %esi movb $1, %al callq fprintf movq stderr(%rip), %rcx movl $.L.str.2, %edi movl $18, %esi movl $1, %edx callq fwrite@PLT movq %rsp, %rdi xorl %esi, %esi callq gettimeofday movq %r15, %rdi xorl %esi, %esi callq gettimeofday movq 16(%rsp), %rax movq 24(%rsp), %rcx subq (%rsp), %rax subq 8(%rsp), %rcx leaq 1000000(%rcx), %rdx movq %rcx, %rsi sarq $63, %rsi addq %rax, %rsi testq %rcx, %rcx cmovnsq %rcx, %rdx xorps %xmm0, %xmm0 cvtsi2ss %rsi, %xmm0 cvtss2sd %xmm0, %xmm0 xorps %xmm1, %xmm1 cvtsi2sd %rdx, %xmm1 divsd .LCPI3_0(%rip), %xmm1 addsd %xmm0, %xmm1 xorps %xmm0, %xmm0 cvtsd2ss %xmm1, %xmm0 movq stderr(%rip), %rdi cvtss2sd %xmm0, %xmm0 movl $.L.str.1, %esi movb $1, %al callq fprintf movl $16000000, %edx # imm = 0xF42400 movq %rbx, %rdi movq 32(%rsp), %rsi # 8-byte Reload callq bcmp@PLT testl %eax, %eax jne .LBB3_15 .LBB3_16: xorl %eax, %eax addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB3_15: .cfi_def_cfa_offset 96 movq stderr(%rip), %rcx movl $.L.str.3, %edi movl $6, %esi movl $1, %edx callq fwrite@PLT jmp .LBB3_16 .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "running on cpu...\n" .size .L.str, 19 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "%f secs\n" .size .L.str.1, 9 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "running on gpu...\n" .size .L.str.2, 19 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "FAIL!\n" .size .L.str.3, 7 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0006f9ca_00000000-6_fw_gpu.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z21generate_random_graphPii .type _Z21generate_random_graphPii, @function _Z21generate_random_graphPii: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movq %rdi, %r14 movl %esi, %r13d movl $-623191334, %edi call srand@PLT testl %r13d, %r13d jle .L3 movl $0, %r12d movl $0, %ebp movl $536870911, %r15d jmp .L5 .L14: leal (%rbx,%r12), %eax cltq movl $0, (%r14,%rax,4) .L7: leal 1(%rbx), %eax cmpl %eax, %r13d je .L13 movl %eax, %ebx .L9: cmpl %ebx, %ebp je .L14 call rand@PLT movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $36, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx leal (%rdx,%rdx,4), %ecx sall $3, %ecx subl %ecx, %eax movl %eax, %edx cmpl $21, %eax cmovge %r15d, %edx leal (%rbx,%r12), %eax cltq movl %edx, (%r14,%rax,4) jmp .L7 .L13: leal 1(%rbp), %eax addl %r13d, %r12d cmpl %ebx, %ebp je .L3 movl %eax, %ebp .L5: movl $0, %ebx jmp .L9 .L3: addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z21generate_random_graphPii, .-_Z21generate_random_graphPii .globl _Z18floyd_warshall_gpuPKiiPi .type _Z18floyd_warshall_gpuPKiiPi, @function _Z18floyd_warshall_gpuPKiiPi: .LFB2058: .cfi_startproc endbr64 ret .cfi_endproc .LFE2058: .size _Z18floyd_warshall_gpuPKiiPi, .-_Z18floyd_warshall_gpuPKiiPi .globl _Z18floyd_warshall_cpuPKiiPi .type _Z18floyd_warshall_cpuPKiiPi, @function _Z18floyd_warshall_cpuPKiiPi: .LFB2059: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movl %esi, %ebp movq %rdx, %r14 movslq %esi, %rbx movq %rbx, %rdx imulq %rbx, %rdx salq $2, %rdx movq %rdi, %rsi movq %r14, %rdi call memcpy@PLT testl %ebp, %ebp jle .L16 movq %r14, %r12 leaq 0(,%rbx,4), %r9 addq %r9, %r14 movq %rbx, %r10 negq %r10 salq $2, %r10 movl $0, %r11d movl $0, %r13d movl $0, %r15d jmp .L18 .L19: addq $4, %rax cmpq %rcx, %rax je .L25 .L20: movl (%rax,%rdi,4), %edx addl (%rsi), %edx cmpl (%rax), %edx jge .L19 movl %edx, (%rax) jmp .L19 .L25: leal 1(%r8), %eax addq %r9, %rcx addq %r9, %rsi subq %rbx, %rdi cmpl %eax, %ebp je .L21 movl %eax, %r8d .L22: leaq (%rcx,%r10), %rax jmp .L20 .L21: leal 1(%r13), %eax addq $4, %r12 addq %rbx, %r11 cmpl %r13d, %r8d je .L16 movl %eax, %r13d .L18: movq %r11, %rdi movq %r12, %rsi movq %r14, %rcx movl %r15d, %r8d jmp .L22 .L16: addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2059: .size _Z18floyd_warshall_cpuPKiiPi, .-_Z18floyd_warshall_cpuPKiiPi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "running on cpu...\n" .LC2: .string "%f secs\n" .LC3: .string "running on gpu...\n" .LC4: .string "FAIL!\n" .text .globl main .type main, @function main: .LFB2060: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $48, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl $16000000, %edi call malloc@PLT movq %rax, %r12 movl $16000000, %edi call malloc@PLT movq %rax, %rbx movl $16000000, %edx movl $0, %esi movq %rax, %rdi call memset@PLT movl $16000000, %edi call malloc@PLT movq %rax, %rbp movl $2000, %esi movq %r12, %rdi call _Z21generate_random_graphPii leaq .LC0(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movq %rsp, %rdi movl $0, %esi call gettimeofday@PLT movq %rbx, %rdx movl $2000, %esi movq %r12, %rdi call _Z18floyd_warshall_cpuPKiiPi leaq 16(%rsp), %rdi movl $0, %esi call gettimeofday@PLT movq 16(%rsp), %rdx subq (%rsp), %rdx movq 24(%rsp), %rax subq 8(%rsp), %rax js .L32 .L27: pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 divsd .LC1(%rip), %xmm0 pxor %xmm1, %xmm1 cvtsi2ssq %rdx, %xmm1 cvtss2sd %xmm1, %xmm1 addsd %xmm1, %xmm0 cvtsd2ss %xmm0, %xmm0 cvtss2sd %xmm0, %xmm0 leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $1, %eax call __fprintf_chk@PLT leaq .LC3(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movq %rsp, %rdi movl $0, %esi call gettimeofday@PLT leaq 16(%rsp), %rdi movl $0, %esi call gettimeofday@PLT movq 16(%rsp), %rdx subq (%rsp), %rdx movq 24(%rsp), %rax subq 8(%rsp), %rax js .L33 .L28: pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 divsd .LC1(%rip), %xmm0 pxor %xmm1, %xmm1 cvtsi2ssq %rdx, %xmm1 cvtss2sd %xmm1, %xmm1 addsd %xmm1, %xmm0 cvtsd2ss %xmm0, %xmm0 cvtss2sd %xmm0, %xmm0 leaq .LC2(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $1, %eax call __fprintf_chk@PLT movl $16000000, %edx movq %rbp, %rsi movq %rbx, %rdi call memcmp@PLT testl %eax, %eax jne .L34 .L29: movq 40(%rsp), %rax subq %fs:40, %rax jne .L35 movl $0, %eax addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L32: .cfi_restore_state subq $1, %rdx addq $1000000, %rax jmp .L27 .L33: subq $1, %rdx addq $1000000, %rax jmp .L28 .L34: leaq .LC4(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT jmp .L29 .L35: call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long 0 .long 1093567616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "fw_gpu.hip" .globl _Z21generate_random_graphPii # -- Begin function _Z21generate_random_graphPii .p2align 4, 0x90 .type _Z21generate_random_graphPii,@function _Z21generate_random_graphPii: # @_Z21generate_random_graphPii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $24, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %esi, %ebx movq %rdi, 16(%rsp) # 8-byte Spill movl $-623191334, %edi # imm = 0xDADADADA callq srand movl %ebx, 12(%rsp) # 4-byte Spill testl %ebx, %ebx jle .LBB0_7 # %bb.1: # %.preheader.lr.ph movl 12(%rsp), %r15d # 4-byte Reload xorl %r12d, %r12d movl $536870911, %ebp # imm = 0x1FFFFFFF xorl %r13d, %r13d jmp .LBB0_2 .p2align 4, 0x90 .LBB0_6: # %._crit_edge # in Loop: Header=BB0_2 Depth=1 incq %r13 addl 12(%rsp), %r12d # 4-byte Folded Reload cmpq %r15, %r13 je .LBB0_7 .LBB0_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB0_3 Depth 2 movl %r12d, %eax movq 16(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %r14 xorl %ebx, %ebx jmp .LBB0_3 .p2align 4, 0x90 .LBB0_5: # in Loop: Header=BB0_3 Depth=2 movl %eax, (%r14,%rbx,4) incq %rbx cmpq %rbx, %r15 je .LBB0_6 .LBB0_3: # Parent Loop BB0_2 Depth=1 # => This Inner Loop Header: Depth=2 xorl %eax, %eax cmpq %rbx, %r13 je .LBB0_5 # %bb.4: # in Loop: Header=BB0_3 Depth=2 callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $36, %rcx addl %edx, %ecx shll $3, %ecx leal (%rcx,%rcx,4), %ecx subl %ecx, %eax cmpl $21, %eax cmovgel %ebp, %eax jmp .LBB0_5 .LBB0_7: # %._crit_edge21 addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z21generate_random_graphPii, .Lfunc_end0-_Z21generate_random_graphPii .cfi_endproc # -- End function .globl _Z18floyd_warshall_gpuPKiiPi # -- Begin function _Z18floyd_warshall_gpuPKiiPi .p2align 4, 0x90 .type _Z18floyd_warshall_gpuPKiiPi,@function _Z18floyd_warshall_gpuPKiiPi: # @_Z18floyd_warshall_gpuPKiiPi .cfi_startproc # %bb.0: retq .Lfunc_end1: .size _Z18floyd_warshall_gpuPKiiPi, .Lfunc_end1-_Z18floyd_warshall_gpuPKiiPi .cfi_endproc # -- End function .globl _Z18floyd_warshall_cpuPKiiPi # -- Begin function _Z18floyd_warshall_cpuPKiiPi .p2align 4, 0x90 .type _Z18floyd_warshall_cpuPKiiPi,@function _Z18floyd_warshall_cpuPKiiPi: # @_Z18floyd_warshall_cpuPKiiPi .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdx, %rbx movl %esi, %ebp movq %rdi, %rsi movslq %ebp, %r14 movq %r14, %rdx imulq %r14, %rdx shlq $2, %rdx movq %rbx, %rdi callq memcpy@PLT testl %r14d, %r14d jle .LBB2_9 # %bb.1: # %.preheader40.lr.ph movl %ebp, %eax xorl %ecx, %ecx xorl %edx, %edx jmp .LBB2_2 .p2align 4, 0x90 .LBB2_8: # %._crit_edge43 # in Loop: Header=BB2_2 Depth=1 incq %rdx addl %ebp, %ecx cmpq %rax, %rdx je .LBB2_9 .LBB2_2: # %.preheader40 # =>This Loop Header: Depth=1 # Child Loop BB2_3 Depth 2 # Child Loop BB2_4 Depth 3 movl %ecx, %esi leaq (%rbx,%rsi,4), %rsi leaq (%rbx,%rdx,4), %rdi xorl %r8d, %r8d xorl %r9d, %r9d jmp .LBB2_3 .p2align 4, 0x90 .LBB2_7: # %._crit_edge # in Loop: Header=BB2_3 Depth=2 incq %r9 addl %ebp, %r8d cmpq %rax, %r9 je .LBB2_8 .LBB2_3: # %.preheader # Parent Loop BB2_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB2_4 Depth 3 movl %r8d, %r10d leaq (%rbx,%r10,4), %r10 movq %r9, %r11 imulq %rax, %r11 xorl %r14d, %r14d jmp .LBB2_4 .p2align 4, 0x90 .LBB2_6: # in Loop: Header=BB2_4 Depth=3 incq %r14 cmpq %r14, %rax je .LBB2_7 .LBB2_4: # Parent Loop BB2_2 Depth=1 # Parent Loop BB2_3 Depth=2 # => This Inner Loop Header: Depth=3 movl (%rsi,%r14,4), %r15d addl (%rdi,%r11,4), %r15d cmpl (%r10,%r14,4), %r15d jge .LBB2_6 # %bb.5: # in Loop: Header=BB2_4 Depth=3 movl %r15d, (%r10,%r14,4) jmp .LBB2_6 .LBB2_9: # %._crit_edge45 addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z18floyd_warshall_cpuPKiiPi, .Lfunc_end2-_Z18floyd_warshall_cpuPKiiPi .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI3_0: .quad 0x412e848000000000 # double 1.0E+6 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $40, %rsp .cfi_def_cfa_offset 96 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $16000000, %edi # imm = 0xF42400 callq malloc movq %rax, %r15 movl $16000000, %edi # imm = 0xF42400 callq malloc movq %rax, %rbx xorl %r12d, %r12d movl $16000000, %edx # imm = 0xF42400 movq %rax, %rdi xorl %esi, %esi callq memset@PLT movl $16000000, %edi # imm = 0xF42400 callq malloc movq %rax, 32(%rsp) # 8-byte Spill movl $-623191334, %edi # imm = 0xDADADADA callq srand movl $536870911, %ebp # imm = 0x1FFFFFFF movq %r15, %r13 jmp .LBB3_1 .p2align 4, 0x90 .LBB3_5: # %._crit_edge.i # in Loop: Header=BB3_1 Depth=1 incq %r12 addq $8000, %r13 # imm = 0x1F40 cmpq $2000, %r12 # imm = 0x7D0 je .LBB3_6 .LBB3_1: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB3_2 Depth 2 xorl %r14d, %r14d jmp .LBB3_2 .p2align 4, 0x90 .LBB3_4: # in Loop: Header=BB3_2 Depth=2 movl %eax, (%r13,%r14,4) incq %r14 cmpq $2000, %r14 # imm = 0x7D0 je .LBB3_5 .LBB3_2: # Parent Loop BB3_1 Depth=1 # => This Inner Loop Header: Depth=2 xorl %eax, %eax cmpq %r14, %r12 je .LBB3_4 # %bb.3: # in Loop: Header=BB3_2 Depth=2 callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $36, %rcx addl %edx, %ecx shll $3, %ecx leal (%rcx,%rcx,4), %ecx subl %ecx, %eax cmpl $21, %eax cmovgel %ebp, %eax jmp .LBB3_4 .LBB3_6: # %_Z21generate_random_graphPii.exit movq stderr(%rip), %rcx movl $.L.str, %edi movl $18, %esi movl $1, %edx callq fwrite@PLT xorl %r12d, %r12d movq %rsp, %rdi xorl %esi, %esi callq gettimeofday movl $16000000, %edx # imm = 0xF42400 movq %rbx, %rdi movq %r15, %rsi callq memcpy@PLT movq %rbx, %rax jmp .LBB3_7 .p2align 4, 0x90 .LBB3_13: # %._crit_edge43.i # in Loop: Header=BB3_7 Depth=1 incq %r12 addq $8000, %rax # imm = 0x1F40 cmpq $2000, %r12 # imm = 0x7D0 je .LBB3_14 .LBB3_7: # %.preheader40.i # =>This Loop Header: Depth=1 # Child Loop BB3_8 Depth 2 # Child Loop BB3_9 Depth 3 leaq (%rbx,%r12,4), %rcx movq %rbx, %rdx xorl %esi, %esi jmp .LBB3_8 .p2align 4, 0x90 .LBB3_12: # %._crit_edge.i28 # in Loop: Header=BB3_8 Depth=2 incq %rsi addq $8000, %rdx # imm = 0x1F40 cmpq $2000, %rsi # imm = 0x7D0 je .LBB3_13 .LBB3_8: # %.preheader.i24 # Parent Loop BB3_7 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB3_9 Depth 3 imulq $2000, %rsi, %rdi # imm = 0x7D0 xorl %r8d, %r8d jmp .LBB3_9 .p2align 4, 0x90 .LBB3_11: # in Loop: Header=BB3_9 Depth=3 incq %r8 cmpq $2000, %r8 # imm = 0x7D0 je .LBB3_12 .LBB3_9: # Parent Loop BB3_7 Depth=1 # Parent Loop BB3_8 Depth=2 # => This Inner Loop Header: Depth=3 movl (%rax,%r8,4), %r9d addl (%rcx,%rdi,4), %r9d cmpl (%rdx,%r8,4), %r9d jge .LBB3_11 # %bb.10: # in Loop: Header=BB3_9 Depth=3 movl %r9d, (%rdx,%r8,4) jmp .LBB3_11 .LBB3_14: # %_Z18floyd_warshall_cpuPKiiPi.exit leaq 16(%rsp), %r15 movq %r15, %rdi xorl %esi, %esi callq gettimeofday movq 16(%rsp), %rax movq 24(%rsp), %rcx subq (%rsp), %rax subq 8(%rsp), %rcx leaq 1000000(%rcx), %rdx movq %rcx, %rsi sarq $63, %rsi addq %rax, %rsi testq %rcx, %rcx cvtsi2ss %rsi, %xmm0 cmovnsq %rcx, %rdx cvtsi2sd %rdx, %xmm1 cvtss2sd %xmm0, %xmm0 divsd .LCPI3_0(%rip), %xmm1 addsd %xmm0, %xmm1 xorps %xmm0, %xmm0 cvtsd2ss %xmm1, %xmm0 movq stderr(%rip), %rdi cvtss2sd %xmm0, %xmm0 movl $.L.str.1, %esi movb $1, %al callq fprintf movq stderr(%rip), %rcx movl $.L.str.2, %edi movl $18, %esi movl $1, %edx callq fwrite@PLT movq %rsp, %rdi xorl %esi, %esi callq gettimeofday movq %r15, %rdi xorl %esi, %esi callq gettimeofday movq 16(%rsp), %rax movq 24(%rsp), %rcx subq (%rsp), %rax subq 8(%rsp), %rcx leaq 1000000(%rcx), %rdx movq %rcx, %rsi sarq $63, %rsi addq %rax, %rsi testq %rcx, %rcx cmovnsq %rcx, %rdx xorps %xmm0, %xmm0 cvtsi2ss %rsi, %xmm0 cvtss2sd %xmm0, %xmm0 xorps %xmm1, %xmm1 cvtsi2sd %rdx, %xmm1 divsd .LCPI3_0(%rip), %xmm1 addsd %xmm0, %xmm1 xorps %xmm0, %xmm0 cvtsd2ss %xmm1, %xmm0 movq stderr(%rip), %rdi cvtss2sd %xmm0, %xmm0 movl $.L.str.1, %esi movb $1, %al callq fprintf movl $16000000, %edx # imm = 0xF42400 movq %rbx, %rdi movq 32(%rsp), %rsi # 8-byte Reload callq bcmp@PLT testl %eax, %eax jne .LBB3_15 .LBB3_16: xorl %eax, %eax addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB3_15: .cfi_def_cfa_offset 96 movq stderr(%rip), %rcx movl $.L.str.3, %edi movl $6, %esi movl $1, %edx callq fwrite@PLT jmp .LBB3_16 .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "running on cpu...\n" .size .L.str, 19 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "%f secs\n" .size .L.str.1, 9 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "running on gpu...\n" .size .L.str.2, 19 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "FAIL!\n" .size .L.str.3, 7 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "cuda_runtime.h" #include "cuda.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> #define iceil(num, den) (num + den - 1) / den #define ARRAY_SIZE 20 //must be an even number; this number/2 = number of points //sets random array and constant mem size //#define BIN 100 //divides the grid into square bins to vote on. perfect square value #define NUM_LINES 4 //top X voted lines. Picks first X Largest from top left to bottom right of grid space. /*GRID evaluated for bin voting * Must always be a square grid with origin at center */ #define dimension 5 #define LXBOUND (-1*dimension) //lowest X #define RXBOUND (dimension) //highest X #define LYBOUND (-1*dimension) //lowest Y #define UYBOUND (dimension) //highest Y //////////////////////////////// #define INCREMENT 1 //precision, length of 1 side of the square(bin) //The (abs)difference between between two sides is the length of the grid. Length/Increment determines how many bins #define column (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT) __constant__ int d_coordarray[ARRAY_SIZE];//Place coordinates in constant memory //show grid with votes. Becomes unuseful when bins > 20x20 void printVotes(int *h_binarray) { // Number of columns for (int i = 0; i < column; ++i) { for (int j = 0; j < column * column; j += column) std::cout << h_binarray[i + j] << "\t"; std::cout << std::endl; } } // Convert from array index to representative slope float slopeCalculator(int index) { const int center = ((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT) / 2; int displacement = 0, flag = 0; int change = column; //from the center, compare columns incrementing by column length until index is found while (flag == 0) { if (index <= center + change && index >= center - change) { flag++; } else { change += column; displacement++; } } //gives the center horizontal value for the bin passed to this function return (displacement * INCREMENT) + (INCREMENT / 2.0); } // Convert from array index to representative intercept float interceptCalculator(int index) { const int col = ((((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT)); const int check = index % col;//represents the displacement shifted into the first column float displacement = 0.0, flag = 0; int center1 = column / 2, center2 = column / 2 - 1; //starting at middle 2 indices move up one and down one until check is found while (flag == 0) { ((check == center1 || check == center2) ? flag : displacement)++; center1++; center2--; } //gives the center,vertical value for the bin passed to this function return (float)(displacement * INCREMENT) + (INCREMENT / 2.0); } // Find n highest indexes in the array void highest_index(int *h_binarray) { const int size = ((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT); const int col = (((RXBOUND - LXBOUND)*(RXBOUND - LXBOUND)) / ((RXBOUND + UYBOUND) * INCREMENT)); int *index = new int[size]; for (int i = 0; i < size; ++i) index[i] = i; bool stop = true; int temp, temp2; // Bubble sort for (int i = 1; (i <= size) && stop; ++i) { stop = false; for (int j = 0; j < (size - 1); ++j) { if (h_binarray[j + 1] > h_binarray[j]) { temp = h_binarray[j]; temp2 = index[j]; h_binarray[j] = h_binarray[j + 1]; index[j] = index[j + 1]; h_binarray[j + 1] = temp; index[j + 1] = temp2; stop = true; } } } //use highest values for slope & intercept float totalslope = 0.0, totalintercept = 0.0; for (int i = 0; i < NUM_LINES; ++i) { const float slope = slopeCalculator(index[i]); const float intercept = interceptCalculator(index[i]); std::cout << "[" << i << "]: "; if (index[i] < (size / 2)) { std::cout << "slope= -" << slope << " and " << std::endl; totalslope = totalslope - slope; } else { std::cout << "slope = " << slope << " and " << std::endl; totalslope = totalslope + slope; } if (index[i] % col < (col / 2)) { std::cout << " and intercept = " << intercept << std::endl << "From point: " << index[i] << std::endl; totalintercept = totalintercept + intercept; } else { std::cout << " and intercept = -" << intercept << std::endl << "From point: " << index[i] << std::endl; totalintercept = totalintercept - intercept; } std::cout << "with value = " << h_binarray[i] << std::endl; } std::cout << "=============" << std::endl; std::cout << "The average of these slopes is: " << totalslope / NUM_LINES << std::endl; std::cout << "The average of these intercept is: " << totalintercept / NUM_LINES << std::endl; std::cout << std::endl; } //kernel functions __global__ void kernelHough(int size, int* d_binarray) { /* take a piece of the array. discretize into y=mx+b format per point. check all points and increment all bins touched at the end recombine all shared memory to a global bin tally. Take the most significant X numbers as lines. discretized from point(1,1) ==(m,n)==> (-1,1) check each bin for count and sum them to a global array in sync NUM of coordinates will check all bins for their own equation and increment appropriately */ // Number from 0 through arraysize / 2 const int thread = 2 * (blockDim.x * blockIdx.x + threadIdx.x); // Slope is discretized space = -x const float slope = -1.0 * d_coordarray[thread]; // Intercept in discretized space = y const float intercept = d_coordarray[thread + 1]; int counter = 0;//keeps current array index being checked //loop through entire graph for (float x = LXBOUND; x < RXBOUND; x += INCREMENT) { const float xMin = x; const float xMax = x + INCREMENT; for (float y = UYBOUND; y > LYBOUND; y -= INCREMENT) { const float yMin = y - INCREMENT; const float yMax = y; //calculates possible y range associated with the known x range const float lower_range = slope * xMin + intercept; const float upper_range = slope * xMax + intercept; //if the possible y ranges corresponding to the x values exist within the actual y range increment bin if ((lower_range <= yMax && lower_range >= yMin) || (upper_range <= yMax && upper_range >= yMin)) atomicAdd(&d_binarray[counter], 1);//increment bin, protected from race condition counter++; } } } //prep function void houghTransform(int* h_input_array, int size) { int *d_binarray; int *h_binarray = new int[((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)]; // Length of the square grid for bins * size of int const int binarraysize = (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) * sizeof(int); const int coordarraysize = size * sizeof(int); // Copy coordinates to Constant Memory cudaMemcpyToSymbol(d_coordarray, h_input_array, coordarraysize); cudaMalloc((void**)&d_binarray, binarraysize); // 1-D Block dim3 myBlockDim(1, 1, 1); // ((size / 2), 1, 1); 1d grid dim3 myGridDim((size/2), 1, 1); kernelHough <<<myGridDim, myBlockDim>>> (size, d_binarray); cudaMemcpy(h_binarray, d_binarray, binarraysize, cudaMemcpyDeviceToHost); if(INCREMENT>=0.5)printVotes(h_binarray); highest_index(h_binarray); } int main() { // Seed RNG srand(time(0)); // Test case array int test[ARRAY_SIZE] = { 1,3,2,5,3,7,4,9,5,11,6,13,7,15,8,17,9,19,10,21}; // Random array initializer int *random = new int[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; ++i) random[i] = (rand() % 10) + 1; // Begin test function houghTransform(test, ARRAY_SIZE); return 0; }
code for sm_80 Function : _Z11kernelHoughiPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ IMAD.MOV.U32 R8, RZ, RZ, -0x3f600000 ; /* 0xc0a00000ff087424 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fc800078e0203 */ /*0060*/ IMAD.SHL.U32 R0, R0, 0x2, RZ ; /* 0x0000000200007824 */ /* 0x000fc800078e00ff */ /*0070*/ IMAD.SHL.U32 R4, R0, 0x4, RZ ; /* 0x0000000400047824 */ /* 0x000fc800078e00ff */ /*0080*/ LDC.64 R6, c[0x3][R4] ; /* 0x00c0000004067b82 */ /* 0x000e240000000a00 */ /*0090*/ I2F.F64 R2, R6 ; /* 0x0000000600027312 */ /* 0x001e300000201c00 */ /*00a0*/ I2F R0, R7 ; /* 0x0000000700007306 */ /* 0x0002b00000201400 */ /*00b0*/ F2F.F32.F64 R5, R2 ; /* 0x0000000200057310 */ /* 0x0010e40000301000 */ /*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff027624 */ /* 0x001fc400078e00ff */ /*00d0*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff037624 */ /* 0x006fc800078e00ff */ /*00e0*/ FADD R4, R8, 1 ; /* 0x3f80000008047421 */ /* 0x000fe20000000000 */ /*00f0*/ BSSY B0, 0x250 ; /* 0x0000015000007945 */ /* 0x000fe20003800000 */ /*0100*/ FFMA R9, -R5.reuse, R8, R0.reuse ; /* 0x0000000805097223 */ /* 0x148fe40000000100 */ /*0110*/ FFMA R10, -R5, R4, R0 ; /* 0x00000004050a7223 */ /* 0x000fc60000000100 */ /*0120*/ FSETP.GE.AND P0, PT, R9.reuse, 4, PT ; /* 0x408000000900780b */ /* 0x040fe40003f06000 */ /*0130*/ FSETP.GE.AND P4, PT, R10.reuse, 4, PT ; /* 0x408000000a00780b */ /* 0x040fe40003f86000 */ /*0140*/ FSETP.LE.AND P0, PT, R9.reuse, 5, P0 ; /* 0x40a000000900780b */ /* 0x040fe40000703000 */ /*0150*/ FSETP.GTU.OR P4, PT, R10, 5, !P4 ; /* 0x40a000000a00780b */ /* 0x000fe4000678c400 */ /*0160*/ FSETP.GE.AND P2, PT, R9.reuse, -1, PT ; /* 0xbf8000000900780b */ /* 0x040fe40003f46000 */ /*0170*/ FSETP.GE.AND P6, PT, R9, 3, PT ; /* 0x404000000900780b */ /* 0x000fc40003fc6000 */ /*0180*/ P2R R6, PR, RZ, 0x4 ; /* 0x00000004ff067803 */ /* 0x000fe40000000000 */ /*0190*/ FSETP.GE.AND P1, PT, R9.reuse, 2, PT ; /* 0x400000000900780b */ /* 0x040fe40003f26000 */ /*01a0*/ FSETP.GE.AND P3, PT, R9.reuse, 1, PT ; /* 0x3f8000000900780b */ /* 0x040fe40003f66000 */ /*01b0*/ FSETP.GE.AND P5, PT, R9, RZ, PT ; /* 0x000000ff0900720b */ /* 0x000fe40003fa6000 */ /*01c0*/ FSETP.GE.AND P2, PT, R10, 3, PT ; /* 0x404000000a00780b */ /* 0x000fe20003f46000 */ /*01d0*/ @P4 BRA !P0, 0x240 ; /* 0x0000006000004947 */ /* 0x000fee0004000000 */ /*01e0*/ S2R R8, SR_LANEID ; /* 0x0000000000087919 */ /* 0x000e220000000000 */ /*01f0*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fc400038e0100 */ /*0200*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*0210*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x000e6a0008000000 */ /*0220*/ ISETP.EQ.U32.AND P0, PT, R8, UR7, PT ; /* 0x0000000708007c0c */ /* 0x001fda000bf02070 */ /*0230*/ @P0 RED.E.ADD.STRONG.GPU [R2.64], R7 ; /* 0x000000070200098e */ /* 0x0021e4000c10e184 */ /*0240*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0250*/ FSETP.LE.AND P6, PT, R9.reuse, 4, P6 ; /* 0x408000000900780b */ /* 0x040fe200037c3000 */ /*0260*/ BSSY B0, 0x320 ; /* 0x000000b000007945 */ /* 0x000fe20003800000 */ /*0270*/ FSETP.GTU.OR P2, PT, R10.reuse, 4, !P2 ; /* 0x408000000a00780b */ /* 0x040fe4000574c400 */ /*0280*/ FSETP.GE.AND P4, PT, R9, -2, PT ; /* 0xc00000000900780b */ /* 0x000fe40003f86000 */ /*0290*/ FSETP.GE.AND P0, PT, R10, 2, PT ; /* 0x400000000a00780b */ /* 0x000fd20003f06000 */ /*02a0*/ @P2 BRA !P6, 0x310 ; /* 0x0000006000002947 */ /* 0x000fea0007000000 */ /*02b0*/ S2R R8, SR_LANEID ; /* 0x0000000000087919 */ /* 0x000e620000000000 */ /*02c0*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*02d0*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*02e0*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*02f0*/ ISETP.EQ.U32.AND P2, PT, R8, UR7, PT ; /* 0x0000000708007c0c */ /* 0x002fda000bf42070 */ /*0300*/ @P2 RED.E.ADD.STRONG.GPU [R2.64+0x4], R7 ; /* 0x000004070200298e */ /* 0x0011e4000c10e184 */ /*0310*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0320*/ FSETP.LE.AND P1, PT, R9.reuse, 3, P1 ; /* 0x404000000900780b */ /* 0x040fe20000f23000 */ /*0330*/ BSSY B0, 0x3f0 ; /* 0x000000b000007945 */ /* 0x000fe20003800000 */ /*0340*/ FSETP.GTU.OR P6, PT, R10.reuse, 3, !P0 ; /* 0x404000000a00780b */ /* 0x040fe400047cc400 */ /*0350*/ FSETP.GE.AND P2, PT, R9, -3, PT ; /* 0xc04000000900780b */ /* 0x000fe40003f46000 */ /*0360*/ FSETP.GE.AND P0, PT, R10, 1, PT ; /* 0x3f8000000a00780b */ /* 0x000fd20003f06000 */ /*0370*/ @P6 BRA !P1, 0x3e0 ; /* 0x0000006000006947 */ /* 0x000fea0004800000 */ /*0380*/ S2R R8, SR_LANEID ; /* 0x0000000000087919 */ /* 0x000e620000000000 */ /*0390*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*03a0*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*03b0*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*03c0*/ ISETP.EQ.U32.AND P1, PT, R8, UR7, PT ; /* 0x0000000708007c0c */ /* 0x002fda000bf22070 */ /*03d0*/ @P1 RED.E.ADD.STRONG.GPU [R2.64+0x8], R7 ; /* 0x000008070200198e */ /* 0x0011e4000c10e184 */ /*03e0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*03f0*/ FSETP.LE.AND P3, PT, R9.reuse, 2, P3 ; /* 0x400000000900780b */ /* 0x040fe20001f63000 */ /*0400*/ BSSY B0, 0x4c0 ; /* 0x000000b000007945 */ /* 0x000fe20003800000 */ /*0410*/ FSETP.GTU.OR P6, PT, R10, 2, !P0 ; /* 0x400000000a00780b */ /* 0x000fe400047cc400 */ /*0420*/ FSETP.GE.AND P1, PT, R9.reuse, -4, PT ; /* 0xc08000000900780b */ /* 0x040fe40003f26000 */ /*0430*/ FSETP.GE.AND P0, PT, R9, -5, PT ; /* 0xc0a000000900780b */ /* 0x000fd20003f06000 */ /*0440*/ @P6 BRA !P3, 0x4b0 ; /* 0x0000006000006947 */ /* 0x000fea0005800000 */ /*0450*/ S2R R8, SR_LANEID ; /* 0x0000000000087919 */ /* 0x000e620000000000 */ /*0460*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*0470*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*0480*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*0490*/ ISETP.EQ.U32.AND P3, PT, R8, UR7, PT ; /* 0x0000000708007c0c */ /* 0x002fda000bf62070 */ /*04a0*/ @P3 RED.E.ADD.STRONG.GPU [R2.64+0xc], R7 ; /* 0x00000c070200398e */ /* 0x0011e4000c10e184 */ /*04b0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*04c0*/ FSETP.GE.AND P6, PT, R10.reuse, RZ, PT ; /* 0x000000ff0a00720b */ /* 0x040fe20003fc6000 */ /*04d0*/ BSSY B0, 0x5d0 ; /* 0x000000f000007945 */ /* 0x000fe20003800000 */ /*04e0*/ FSETP.LE.AND P5, PT, R9.reuse, 1, P5 ; /* 0x3f8000000900780b */ /* 0x040fe40002fa3000 */ /*04f0*/ FSETP.GTU.OR P6, PT, R10, 1, !P6 ; /* 0x3f8000000a00780b */ /* 0x000fe400077cc400 */ /*0500*/ FSETP.LE.AND P4, PT, R9, -1, P4 ; /* 0xbf8000000900780b */ /* 0x000fe40002783000 */ /*0510*/ ISETP.NE.AND P3, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe40003f65270 */ /*0520*/ P2R R6, PR, RZ, 0x10 ; /* 0x00000010ff067803 */ /* 0x000fc40000000000 */ /*0530*/ FSETP.LE.AND P3, PT, R9, RZ, P3 ; /* 0x000000ff0900720b */ /* 0x000fe40001f63000 */ /*0540*/ FSETP.GE.AND P4, PT, R10, -1, PT ; /* 0xbf8000000a00780b */ /* 0x000fc60003f86000 */ /*0550*/ @P6 BRA !P5, 0x5c0 ; /* 0x0000006000006947 */ /* 0x000fea0006800000 */ /*0560*/ S2R R8, SR_LANEID ; /* 0x0000000000087919 */ /* 0x000e620000000000 */ /*0570*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*0580*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*0590*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*05a0*/ ISETP.EQ.U32.AND P5, PT, R8, UR7, PT ; /* 0x0000000708007c0c */ /* 0x002fda000bfa2070 */ /*05b0*/ @P5 RED.E.ADD.STRONG.GPU [R2.64+0x10], R7 ; /* 0x000010070200598e */ /* 0x0011e4000c10e184 */ /*05c0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*05d0*/ FSETP.GTU.OR P6, PT, R10.reuse, RZ, !P4 ; /* 0x000000ff0a00720b */ /* 0x040fe200067cc400 */ /*05e0*/ BSSY B0, 0x6b0 ; /* 0x000000c000007945 */ /* 0x000fe20003800000 */ /*05f0*/ FSETP.GE.AND P5, PT, R10.reuse, -2, PT ; /* 0xc00000000a00780b */ /* 0x040fe40003fa6000 */ /*0600*/ FSETP.LE.AND P2, PT, R9.reuse, -2, P2 ; /* 0xc00000000900780b */ /* 0x040fe40001743000 */ /*0610*/ FSETP.LE.AND P1, PT, R9, -3, P1 ; /* 0xc04000000900780b */ /* 0x000fe40000f23000 */ /*0620*/ FSETP.GE.AND P4, PT, R10, -3, PT ; /* 0xc04000000a00780b */ /* 0x000fca0003f86000 */ /*0630*/ @P6 BRA !P3, 0x6a0 ; /* 0x0000006000006947 */ /* 0x000fea0005800000 */ /*0640*/ S2R R8, SR_LANEID ; /* 0x0000000000087919 */ /* 0x000e620000000000 */ /*0650*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*0660*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*0670*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*0680*/ ISETP.EQ.U32.AND P3, PT, R8, UR7, PT ; /* 0x0000000708007c0c */ /* 0x002fda000bf62070 */ /*0690*/ @P3 RED.E.ADD.STRONG.GPU [R2.64+0x14], R7 ; /* 0x000014070200398e */ /* 0x0011e4000c10e184 */ /*06a0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*06b0*/ ISETP.NE.AND P6, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe20003fc5270 */ /*06c0*/ BSSY B0, 0x7d0 ; /* 0x0000010000007945 */ /* 0x000fe20003800000 */ /*06d0*/ FSETP.GTU.OR P5, PT, R10.reuse, -1, !P5 ; /* 0xbf8000000a00780b */ /* 0x040fe40006fac400 */ /*06e0*/ FSETP.GTU.OR P4, PT, R10.reuse, -2, !P4 ; /* 0xc00000000a00780b */ /* 0x040fe4000678c400 */ /*06f0*/ FSETP.GE.AND P3, PT, R10.reuse, -4, PT ; /* 0xc08000000a00780b */ /* 0x040fe40003f66000 */ /*0700*/ P2R R6, PR, RZ, 0x10 ; /* 0x00000010ff067803 */ /* 0x000fe40000000000 */ /*0710*/ FSETP.GE.AND P4, PT, R10, -5, PT ; /* 0xc0a000000a00780b */ /* 0x000fc40003f86000 */ /*0720*/ FSETP.LE.AND P0, PT, R9, -4, P0 ; /* 0xc08000000900780b */ /* 0x000fe40000703000 */ /*0730*/ FSETP.GTU.OR P3, PT, R10.reuse, -3, !P3 ; /* 0xc04000000a00780b */ /* 0x040fe40005f6c400 */ /*0740*/ FSETP.GTU.OR P4, PT, R10, -4, !P4 ; /* 0xc08000000a00780b */ /* 0x000fe2000678c400 */ /*0750*/ @P5 BRA !P6, 0x7c0 ; /* 0x0000006000005947 */ /* 0x000fee0007000000 */ /*0760*/ S2R R8, SR_LANEID ; /* 0x0000000000087919 */ /* 0x000e620000000000 */ /*0770*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*0780*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*0790*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*07a0*/ ISETP.EQ.U32.AND P5, PT, R8, UR7, PT ; /* 0x0000000708007c0c */ /* 0x002fda000bfa2070 */ /*07b0*/ @P5 RED.E.ADD.STRONG.GPU [R2.64+0x18], R7 ; /* 0x000018070200598e */ /* 0x0011e4000c10e184 */ /*07c0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*07d0*/ ISETP.NE.AND P6, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe20003fc5270 */ /*07e0*/ BSSY B0, 0x880 ; /* 0x0000009000007945 */ /* 0x000fe20003800000 */ /*07f0*/ FSETP.GEU.AND P5, PT, R4, 5, PT ; /* 0x40a000000400780b */ /* 0x000fd60003fae000 */ /*0800*/ @P6 BRA !P2, 0x870 ; /* 0x0000006000006947 */ /* 0x000fea0005000000 */ /*0810*/ S2R R6, SR_LANEID ; /* 0x0000000000067919 */ /* 0x000e620000000000 */ /*0820*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*0830*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*0840*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*0850*/ ISETP.EQ.U32.AND P2, PT, R6, UR7, PT ; /* 0x0000000706007c0c */ /* 0x002fda000bf42070 */ /*0860*/ @P2 RED.E.ADD.STRONG.GPU [R2.64+0x1c], R7 ; /* 0x00001c070200298e */ /* 0x0011e4000c10e184 */ /*0870*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0880*/ BSSY B0, 0x910 ; /* 0x0000008000007945 */ /* 0x000fe20003800000 */ /*0890*/ @P3 BRA !P1, 0x900 ; /* 0x0000006000003947 */ /* 0x000fea0004800000 */ /*08a0*/ S2R R6, SR_LANEID ; /* 0x0000000000067919 */ /* 0x000e620000000000 */ /*08b0*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*08c0*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*08d0*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*08e0*/ ISETP.EQ.U32.AND P1, PT, R6, UR7, PT ; /* 0x0000000706007c0c */ /* 0x002fda000bf22070 */ /*08f0*/ @P1 RED.E.ADD.STRONG.GPU [R2.64+0x20], R7 ; /* 0x000020070200198e */ /* 0x0011e4000c10e184 */ /*0900*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0910*/ BSSY B0, 0x9b0 ; /* 0x0000009000007945 */ /* 0x000fe20003800000 */ /*0920*/ IMAD.MOV.U32 R8, RZ, RZ, R4 ; /* 0x000000ffff087224 */ /* 0x000fe200078e0004 */ /*0930*/ @P4 BRA !P0, 0x9a0 ; /* 0x0000006000004947 */ /* 0x000fea0004000000 */ /*0940*/ S2R R4, SR_LANEID ; /* 0x0000000000047919 */ /* 0x000e620000000000 */ /*0950*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*0960*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*0970*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*0980*/ ISETP.EQ.U32.AND P0, PT, R4, UR7, PT ; /* 0x0000000704007c0c */ /* 0x002fda000bf02070 */ /*0990*/ @P0 RED.E.ADD.STRONG.GPU [R2.64+0x24], R7 ; /* 0x000024070200098e */ /* 0x0011e4000c10e184 */ /*09a0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*09b0*/ IADD3 R2, P0, R2, 0x28, RZ ; /* 0x0000002802027810 */ /* 0x001fca0007f1e0ff */ /*09c0*/ IMAD.X R3, RZ, RZ, R3, P0 ; /* 0x000000ffff037224 */ /* 0x000fe200000e0603 */ /*09d0*/ @!P5 BRA 0xe0 ; /* 0xfffff7000000d947 */ /* 0x000fea000383ffff */ /*09e0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*09f0*/ BRA 0x9f0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0a00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "cuda_runtime.h" #include "cuda.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> #define iceil(num, den) (num + den - 1) / den #define ARRAY_SIZE 20 //must be an even number; this number/2 = number of points //sets random array and constant mem size //#define BIN 100 //divides the grid into square bins to vote on. perfect square value #define NUM_LINES 4 //top X voted lines. Picks first X Largest from top left to bottom right of grid space. /*GRID evaluated for bin voting * Must always be a square grid with origin at center */ #define dimension 5 #define LXBOUND (-1*dimension) //lowest X #define RXBOUND (dimension) //highest X #define LYBOUND (-1*dimension) //lowest Y #define UYBOUND (dimension) //highest Y //////////////////////////////// #define INCREMENT 1 //precision, length of 1 side of the square(bin) //The (abs)difference between between two sides is the length of the grid. Length/Increment determines how many bins #define column (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT) __constant__ int d_coordarray[ARRAY_SIZE];//Place coordinates in constant memory //show grid with votes. Becomes unuseful when bins > 20x20 void printVotes(int *h_binarray) { // Number of columns for (int i = 0; i < column; ++i) { for (int j = 0; j < column * column; j += column) std::cout << h_binarray[i + j] << "\t"; std::cout << std::endl; } } // Convert from array index to representative slope float slopeCalculator(int index) { const int center = ((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT) / 2; int displacement = 0, flag = 0; int change = column; //from the center, compare columns incrementing by column length until index is found while (flag == 0) { if (index <= center + change && index >= center - change) { flag++; } else { change += column; displacement++; } } //gives the center horizontal value for the bin passed to this function return (displacement * INCREMENT) + (INCREMENT / 2.0); } // Convert from array index to representative intercept float interceptCalculator(int index) { const int col = ((((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT)); const int check = index % col;//represents the displacement shifted into the first column float displacement = 0.0, flag = 0; int center1 = column / 2, center2 = column / 2 - 1; //starting at middle 2 indices move up one and down one until check is found while (flag == 0) { ((check == center1 || check == center2) ? flag : displacement)++; center1++; center2--; } //gives the center,vertical value for the bin passed to this function return (float)(displacement * INCREMENT) + (INCREMENT / 2.0); } // Find n highest indexes in the array void highest_index(int *h_binarray) { const int size = ((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT); const int col = (((RXBOUND - LXBOUND)*(RXBOUND - LXBOUND)) / ((RXBOUND + UYBOUND) * INCREMENT)); int *index = new int[size]; for (int i = 0; i < size; ++i) index[i] = i; bool stop = true; int temp, temp2; // Bubble sort for (int i = 1; (i <= size) && stop; ++i) { stop = false; for (int j = 0; j < (size - 1); ++j) { if (h_binarray[j + 1] > h_binarray[j]) { temp = h_binarray[j]; temp2 = index[j]; h_binarray[j] = h_binarray[j + 1]; index[j] = index[j + 1]; h_binarray[j + 1] = temp; index[j + 1] = temp2; stop = true; } } } //use highest values for slope & intercept float totalslope = 0.0, totalintercept = 0.0; for (int i = 0; i < NUM_LINES; ++i) { const float slope = slopeCalculator(index[i]); const float intercept = interceptCalculator(index[i]); std::cout << "[" << i << "]: "; if (index[i] < (size / 2)) { std::cout << "slope= -" << slope << " and " << std::endl; totalslope = totalslope - slope; } else { std::cout << "slope = " << slope << " and " << std::endl; totalslope = totalslope + slope; } if (index[i] % col < (col / 2)) { std::cout << " and intercept = " << intercept << std::endl << "From point: " << index[i] << std::endl; totalintercept = totalintercept + intercept; } else { std::cout << " and intercept = -" << intercept << std::endl << "From point: " << index[i] << std::endl; totalintercept = totalintercept - intercept; } std::cout << "with value = " << h_binarray[i] << std::endl; } std::cout << "=============" << std::endl; std::cout << "The average of these slopes is: " << totalslope / NUM_LINES << std::endl; std::cout << "The average of these intercept is: " << totalintercept / NUM_LINES << std::endl; std::cout << std::endl; } //kernel functions __global__ void kernelHough(int size, int* d_binarray) { /* take a piece of the array. discretize into y=mx+b format per point. check all points and increment all bins touched at the end recombine all shared memory to a global bin tally. Take the most significant X numbers as lines. discretized from point(1,1) ==(m,n)==> (-1,1) check each bin for count and sum them to a global array in sync NUM of coordinates will check all bins for their own equation and increment appropriately */ // Number from 0 through arraysize / 2 const int thread = 2 * (blockDim.x * blockIdx.x + threadIdx.x); // Slope is discretized space = -x const float slope = -1.0 * d_coordarray[thread]; // Intercept in discretized space = y const float intercept = d_coordarray[thread + 1]; int counter = 0;//keeps current array index being checked //loop through entire graph for (float x = LXBOUND; x < RXBOUND; x += INCREMENT) { const float xMin = x; const float xMax = x + INCREMENT; for (float y = UYBOUND; y > LYBOUND; y -= INCREMENT) { const float yMin = y - INCREMENT; const float yMax = y; //calculates possible y range associated with the known x range const float lower_range = slope * xMin + intercept; const float upper_range = slope * xMax + intercept; //if the possible y ranges corresponding to the x values exist within the actual y range increment bin if ((lower_range <= yMax && lower_range >= yMin) || (upper_range <= yMax && upper_range >= yMin)) atomicAdd(&d_binarray[counter], 1);//increment bin, protected from race condition counter++; } } } //prep function void houghTransform(int* h_input_array, int size) { int *d_binarray; int *h_binarray = new int[((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)]; // Length of the square grid for bins * size of int const int binarraysize = (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) * sizeof(int); const int coordarraysize = size * sizeof(int); // Copy coordinates to Constant Memory cudaMemcpyToSymbol(d_coordarray, h_input_array, coordarraysize); cudaMalloc((void**)&d_binarray, binarraysize); // 1-D Block dim3 myBlockDim(1, 1, 1); // ((size / 2), 1, 1); 1d grid dim3 myGridDim((size/2), 1, 1); kernelHough <<<myGridDim, myBlockDim>>> (size, d_binarray); cudaMemcpy(h_binarray, d_binarray, binarraysize, cudaMemcpyDeviceToHost); if(INCREMENT>=0.5)printVotes(h_binarray); highest_index(h_binarray); } int main() { // Seed RNG srand(time(0)); // Test case array int test[ARRAY_SIZE] = { 1,3,2,5,3,7,4,9,5,11,6,13,7,15,8,17,9,19,10,21}; // Random array initializer int *random = new int[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; ++i) random[i] = (rand() % 10) + 1; // Begin test function houghTransform(test, ARRAY_SIZE); return 0; }
.file "tmpxft_000397ef_00000000-6_kernel.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3677: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3677: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "\t" .text .globl _Z10printVotesPi .type _Z10printVotesPi, @function _Z10printVotesPi: .LFB3669: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 leaq 400(%rdi), %rbp movl $0, %r14d leaq _ZSt4cout(%rip), %r12 leaq .LC0(%rip), %r13 jmp .L4 .L12: call _ZSt16__throw_bad_castv@PLT .L7: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) movl %eax, %esi .L8: movsbl %sil, %esi movq %r12, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT addl $1, %r14d addq $4, %rbp cmpl $10, %r14d je .L3 .L4: leaq -400(%rbp), %rbx .L5: movl (%rbx), %esi movq %r12, %rdi call _ZNSolsEi@PLT movq %rax, %rdi movl $1, %edx movq %r13, %rsi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT addq $40, %rbx cmpq %rbx, %rbp jne .L5 movq (%r12), %rax movq -24(%rax), %rax movq 240(%r12,%rax), %rbx testq %rbx, %rbx je .L12 cmpb $0, 56(%rbx) je .L7 movzbl 67(%rbx), %esi jmp .L8 .L3: popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3669: .size _Z10printVotesPi, .-_Z10printVotesPi .globl _Z15slopeCalculatori .type _Z15slopeCalculatori, @function _Z15slopeCalculatori: .LFB3670: .cfi_startproc endbr64 movl $60, %eax movl $0, %edx movl $100, %esi jmp .L16 .L14: addl $1, %edx addl $10, %eax .L16: cmpl %eax, %edi jg .L14 movl %esi, %ecx subl %eax, %ecx cmpl %edi, %ecx jg .L14 pxor %xmm0, %xmm0 cvtsi2sdl %edx, %xmm0 addsd .LC1(%rip), %xmm0 cvtsd2ss %xmm0, %xmm0 ret .cfi_endproc .LFE3670: .size _Z15slopeCalculatori, .-_Z15slopeCalculatori .globl _Z19interceptCalculatori .type _Z19interceptCalculatori, @function _Z19interceptCalculatori: .LFB3671: .cfi_startproc endbr64 movslq %edi, %rdx imulq $1717986919, %rdx, %rdx sarq $34, %rdx movl %edi, %eax sarl $31, %eax subl %eax, %edx leal (%rdx,%rdx,4), %eax addl %eax, %eax subl %eax, %edi movl %edi, %edx movl $5, %eax pxor %xmm0, %xmm0 movl $9, %esi movss .LC4(%rip), %xmm1 jmp .L20 .L23: addss %xmm1, %xmm0 addl $1, %eax .L20: movl %esi, %ecx subl %eax, %ecx cmpl %edx, %ecx je .L21 cmpl %eax, %edx jne .L23 .L21: addss .LC3(%rip), %xmm0 ret .cfi_endproc .LFE3671: .size _Z19interceptCalculatori, .-_Z19interceptCalculatori .section .rodata.str1.1 .LC5: .string "[" .LC6: .string "]: " .LC7: .string "slope= -" .LC8: .string " and " .LC9: .string "slope = " .LC10: .string " and intercept = " .LC11: .string "From point: " .LC12: .string " and intercept = -" .LC13: .string "with value = " .LC14: .string "=============" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC15: .string "The average of these slopes is: " .align 8 .LC17: .string "The average of these intercept is: " .text .globl _Z13highest_indexPi .type _Z13highest_indexPi, @function _Z13highest_indexPi: .LFB3672: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $40, %rsp .cfi_def_cfa_offset 96 movq %rdi, %r14 movl $400, %edi call _Znam@PLT movq %rax, %rbx movl $0, %eax .L25: movl %eax, (%rbx,%rax,4) addq $1, %rax cmpq $100, %rax jne .L25 movl $1, %r10d leaq 400(%r14), %rdi movl $0, %r11d movl $1, %r9d jmp .L26 .L27: addq $4, %rax addq $4, %rdx cmpq %rdi, %rax je .L72 .L28: movl (%rax), %esi movl -4(%rax), %ecx cmpl %ecx, %esi jle .L27 movl -4(%rdx), %r8d movl %esi, -4(%rax) movl (%rdx), %esi movl %esi, -4(%rdx) movl %ecx, (%rax) movl %r8d, (%rdx) movl %r9d, %r8d jmp .L27 .L72: addl $1, %r10d cmpl $100, %r10d jg .L67 testb %r8b, %r8b je .L67 .L26: leaq 4(%r14), %rax leaq 4(%rbx), %rdx movl %r11d, %r8d jmp .L28 .L67: movl $0, %r13d movl $0x00000000, 20(%rsp) movl $0x00000000, 16(%rsp) leaq _ZSt4cout(%rip), %rbp jmp .L29 .L76: call _ZSt16__throw_bad_castv@PLT .L32: movq %r15, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r15), %rax movl $10, %esi movq %r15, %rdi call *48(%rax) movl %eax, %esi jmp .L33 .L30: movl $8, %edx leaq .LC9(%rip), %rsi movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT pxor %xmm0, %xmm0 cvtss2sd (%rsp), %xmm0 movq %rbp, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %r15 movq %rax, 24(%rsp) movl $5, %edx leaq .LC8(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq (%r15), %rax movq -24(%rax), %rax movq 240(%r15,%rax), %r15 testq %r15, %r15 je .L73 cmpb $0, 56(%r15) je .L36 movzbl 67(%r15), %esi .L37: movsbl %sil, %esi movq 24(%rsp), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movss 16(%rsp), %xmm2 addss (%rsp), %xmm2 movss %xmm2, 16(%rsp) jmp .L34 .L73: call _ZSt16__throw_bad_castv@PLT .L36: movq %r15, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r15), %rax movl $10, %esi movq %r15, %rdi call *48(%rax) movl %eax, %esi jmp .L37 .L77: call _ZSt16__throw_bad_castv@PLT .L40: movq %r15, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r15), %rax movl $10, %esi movq %r15, %rdi call *48(%rax) movl %eax, %esi jmp .L41 .L78: call _ZSt16__throw_bad_castv@PLT .L43: movq %r15, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r15), %rax movl $10, %esi movq %r15, %rdi call *48(%rax) movl %eax, %esi .L44: movsbl %sil, %esi movq %r12, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movss 20(%rsp), %xmm3 addss 12(%rsp), %xmm3 movss %xmm3, 20(%rsp) .L45: movl $13, %edx leaq .LC13(%rip), %rsi movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl (%r14,%r13,4), %esi movq %rbp, %rdi call _ZNSolsEi@PLT movq %rax, %r12 movq (%rax), %rax movq -24(%rax), %rax movq 240(%r12,%rax), %r15 testq %r15, %r15 je .L74 cmpb $0, 56(%r15) je .L53 movzbl 67(%r15), %esi .L54: movsbl %sil, %esi movq %r12, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT addq $1, %r13 cmpq $4, %r13 je .L75 .L29: movl (%rbx,%r13,4), %r12d movl %r12d, %edi call _Z15slopeCalculatori movss %xmm0, (%rsp) movl %r12d, %edi call _Z19interceptCalculatori movss %xmm0, 12(%rsp) movl $1, %edx leaq .LC5(%rip), %rsi movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl %r13d, %esi movq %rbp, %rdi call _ZNSolsEi@PLT movq %rax, %rdi movl $3, %edx leaq .LC6(%rip), %rsi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT cmpl $49, %r12d jg .L30 movl $8, %edx leaq .LC7(%rip), %rsi movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT pxor %xmm0, %xmm0 cvtss2sd (%rsp), %xmm0 movq %rbp, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %r15 movq %rax, 24(%rsp) movl $5, %edx leaq .LC8(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq (%r15), %rax movq -24(%rax), %rax movq 240(%r15,%rax), %r15 testq %r15, %r15 je .L76 cmpb $0, 56(%r15) je .L32 movzbl 67(%r15), %esi .L33: movsbl %sil, %esi movq 24(%rsp), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movss 16(%rsp), %xmm1 subss (%rsp), %xmm1 movss %xmm1, 16(%rsp) .L34: movslq %r12d, %rax imulq $1717986919, %rax, %rax sarq $34, %rax movl %r12d, %edx sarl $31, %edx subl %edx, %eax leal (%rax,%rax,4), %eax addl %eax, %eax movl %r12d, %edx subl %eax, %edx cmpl $4, %edx jg .L38 movl $17, %edx leaq .LC10(%rip), %rsi movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT pxor %xmm0, %xmm0 cvtss2sd 12(%rsp), %xmm0 movq %rbp, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi movq %rax, (%rsp) movq (%rax), %rax movq -24(%rax), %rax movq 240(%rdi,%rax), %r15 testq %r15, %r15 je .L77 cmpb $0, 56(%r15) je .L40 movzbl 67(%r15), %esi .L41: movsbl %sil, %esi movq (%rsp), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movq %rax, %r15 movl $12, %edx leaq .LC11(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl %r12d, %esi movq %r15, %rdi call _ZNSolsEi@PLT movq %rax, %r12 movq (%rax), %rax movq -24(%rax), %rax movq 240(%r12,%rax), %r15 testq %r15, %r15 je .L78 cmpb $0, 56(%r15) je .L43 movzbl 67(%r15), %esi jmp .L44 .L38: movl $18, %edx leaq .LC12(%rip), %rsi movq %rbp, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT pxor %xmm0, %xmm0 cvtss2sd 12(%rsp), %xmm0 movq %rbp, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi movq %rax, (%rsp) movq (%rax), %rax movq -24(%rax), %rax movq 240(%rdi,%rax), %r15 testq %r15, %r15 je .L79 cmpb $0, 56(%r15) je .L47 movzbl 67(%r15), %esi .L48: movsbl %sil, %esi movq (%rsp), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movq %rax, %r15 movl $12, %edx leaq .LC11(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl %r12d, %esi movq %r15, %rdi call _ZNSolsEi@PLT movq %rax, %r12 movq (%rax), %rax movq -24(%rax), %rax movq 240(%r12,%rax), %r15 testq %r15, %r15 je .L80 cmpb $0, 56(%r15) je .L50 movzbl 67(%r15), %esi .L51: movsbl %sil, %esi movq %r12, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movss 20(%rsp), %xmm4 subss 12(%rsp), %xmm4 movss %xmm4, 20(%rsp) jmp .L45 .L79: call _ZSt16__throw_bad_castv@PLT .L47: movq %r15, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r15), %rax movl $10, %esi movq %r15, %rdi call *48(%rax) movl %eax, %esi jmp .L48 .L80: call _ZSt16__throw_bad_castv@PLT .L50: movq %r15, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r15), %rax movl $10, %esi movq %r15, %rdi call *48(%rax) movl %eax, %esi jmp .L51 .L74: call _ZSt16__throw_bad_castv@PLT .L53: movq %r15, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r15), %rax movl $10, %esi movq %r15, %rdi call *48(%rax) movl %eax, %esi jmp .L54 .L75: movl $13, %edx leaq .LC14(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbx testq %rbx, %rbx je .L81 cmpb $0, 56(%rbx) je .L56 movzbl 67(%rbx), %eax .L57: movsbl %al, %esi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $32, %edx leaq .LC15(%rip), %rsi movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movss 16(%rsp), %xmm0 mulss .LC16(%rip), %xmm0 cvtss2sd %xmm0, %xmm0 movq %rbx, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rbp movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbp,%rax), %rbx testq %rbx, %rbx je .L82 cmpb $0, 56(%rbx) je .L59 movzbl 67(%rbx), %eax .L60: movsbl %al, %esi movq %rbp, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $35, %edx leaq .LC17(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movss 20(%rsp), %xmm0 mulss .LC16(%rip), %xmm0 cvtss2sd %xmm0, %xmm0 movq %rbx, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rbp movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbp,%rax), %rbx testq %rbx, %rbx je .L83 cmpb $0, 56(%rbx) je .L62 movzbl 67(%rbx), %eax .L63: movsbl %al, %esi movq %rbp, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax leaq _ZSt4cout(%rip), %rdx movq 240(%rdx,%rax), %rbx testq %rbx, %rbx je .L84 cmpb $0, 56(%rbx) je .L65 movzbl 67(%rbx), %eax .L66: movsbl %al, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L81: .cfi_restore_state call _ZSt16__throw_bad_castv@PLT .L56: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) jmp .L57 .L82: call _ZSt16__throw_bad_castv@PLT .L59: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) jmp .L60 .L83: call _ZSt16__throw_bad_castv@PLT .L62: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) jmp .L63 .L84: call _ZSt16__throw_bad_castv@PLT .L65: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) jmp .L66 .cfi_endproc .LFE3672: .size _Z13highest_indexPi, .-_Z13highest_indexPi .globl _Z32__device_stub__Z11kernelHoughiPiiPi .type _Z32__device_stub__Z11kernelHoughiPiiPi, @function _Z32__device_stub__Z11kernelHoughiPiiPi: .LFB3699: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 12(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 12(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L89 .L85: movq 104(%rsp), %rax subq %fs:40, %rax jne .L90 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L89: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z11kernelHoughiPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L85 .L90: call __stack_chk_fail@PLT .cfi_endproc .LFE3699: .size _Z32__device_stub__Z11kernelHoughiPiiPi, .-_Z32__device_stub__Z11kernelHoughiPiiPi .globl _Z11kernelHoughiPi .type _Z11kernelHoughiPi, @function _Z11kernelHoughiPi: .LFB3700: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z11kernelHoughiPiiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3700: .size _Z11kernelHoughiPi, .-_Z11kernelHoughiPi .globl _Z14houghTransformPii .type _Z14houghTransformPii, @function _Z14houghTransformPii: .LFB3673: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $48, %rsp .cfi_def_cfa_offset 80 movq %rdi, %r12 movl %esi, %ebx movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl $400, %edi call _Znam@PLT movq %rax, %rbp leal 0(,%rbx,4), %edx movslq %edx, %rdx movl $1, %r8d movl $0, %ecx movq %r12, %rsi leaq _ZL12d_coordarray(%rip), %rdi call cudaMemcpyToSymbol@PLT leaq 8(%rsp), %rdi movl $400, %esi call cudaMalloc@PLT movl $1, 16(%rsp) movl $1, 20(%rsp) movl %ebx, %eax shrl $31, %eax addl %ebx, %eax sarl %eax movl %eax, 28(%rsp) movl $1, 32(%rsp) movl $0, %r9d movl $0, %r8d movq 16(%rsp), %rdx movl $1, %ecx movq 28(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L97 .L94: movl $2, %ecx movl $400, %edx movq 8(%rsp), %rsi movq %rbp, %rdi call cudaMemcpy@PLT movq %rbp, %rdi call _Z10printVotesPi movq %rbp, %rdi call _Z13highest_indexPi movq 40(%rsp), %rax subq %fs:40, %rax jne .L98 addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L97: .cfi_restore_state movq 8(%rsp), %rsi movl %ebx, %edi call _Z32__device_stub__Z11kernelHoughiPiiPi jmp .L94 .L98: call __stack_chk_fail@PLT .cfi_endproc .LFE3673: .size _Z14houghTransformPii, .-_Z14houghTransformPii .globl main .type main, @function main: .LFB3674: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $104, %rsp .cfi_def_cfa_offset 128 movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT movl $1, (%rsp) movl $3, 4(%rsp) movl $2, 8(%rsp) movl $5, 12(%rsp) movl $3, 16(%rsp) movl $7, 20(%rsp) movl $4, 24(%rsp) movl $9, 28(%rsp) movl $5, 32(%rsp) movl $11, 36(%rsp) movl $6, 40(%rsp) movl $13, 44(%rsp) movl $7, 48(%rsp) movl $15, 52(%rsp) movl $8, 56(%rsp) movl $17, 60(%rsp) movl $9, 64(%rsp) movl $19, 68(%rsp) movl $10, 72(%rsp) movl $21, 76(%rsp) movl $80, %edi call _Znam@PLT movq %rax, %rbx leaq 80(%rax), %rbp .L100: call rand@PLT movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $34, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx leal (%rdx,%rdx,4), %edx addl %edx, %edx subl %edx, %eax addl $1, %eax movl %eax, (%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L100 movq %rsp, %rdi movl $20, %esi call _Z14houghTransformPii movq 88(%rsp), %rax subq %fs:40, %rax jne .L104 movl $0, %eax addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L104: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE3674: .size main, .-main .section .rodata.str1.1 .LC18: .string "_Z11kernelHoughiPi" .LC19: .string "d_coordarray" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3702: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC18(%rip), %rdx movq %rdx, %rcx leaq _Z11kernelHoughiPi(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $1 .cfi_def_cfa_offset 32 movl $80, %r9d movl $0, %r8d leaq .LC19(%rip), %rdx movq %rdx, %rcx leaq _ZL12d_coordarray(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3702: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .local _ZL12d_coordarray .comm _ZL12d_coordarray,80,32 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long 0 .long 1071644672 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC3: .long 1056964608 .align 4 .LC4: .long 1065353216 .align 4 .LC16: .long 1048576000 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "cuda_runtime.h" #include "cuda.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> #define iceil(num, den) (num + den - 1) / den #define ARRAY_SIZE 20 //must be an even number; this number/2 = number of points //sets random array and constant mem size //#define BIN 100 //divides the grid into square bins to vote on. perfect square value #define NUM_LINES 4 //top X voted lines. Picks first X Largest from top left to bottom right of grid space. /*GRID evaluated for bin voting * Must always be a square grid with origin at center */ #define dimension 5 #define LXBOUND (-1*dimension) //lowest X #define RXBOUND (dimension) //highest X #define LYBOUND (-1*dimension) //lowest Y #define UYBOUND (dimension) //highest Y //////////////////////////////// #define INCREMENT 1 //precision, length of 1 side of the square(bin) //The (abs)difference between between two sides is the length of the grid. Length/Increment determines how many bins #define column (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT) __constant__ int d_coordarray[ARRAY_SIZE];//Place coordinates in constant memory //show grid with votes. Becomes unuseful when bins > 20x20 void printVotes(int *h_binarray) { // Number of columns for (int i = 0; i < column; ++i) { for (int j = 0; j < column * column; j += column) std::cout << h_binarray[i + j] << "\t"; std::cout << std::endl; } } // Convert from array index to representative slope float slopeCalculator(int index) { const int center = ((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT) / 2; int displacement = 0, flag = 0; int change = column; //from the center, compare columns incrementing by column length until index is found while (flag == 0) { if (index <= center + change && index >= center - change) { flag++; } else { change += column; displacement++; } } //gives the center horizontal value for the bin passed to this function return (displacement * INCREMENT) + (INCREMENT / 2.0); } // Convert from array index to representative intercept float interceptCalculator(int index) { const int col = ((((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT)); const int check = index % col;//represents the displacement shifted into the first column float displacement = 0.0, flag = 0; int center1 = column / 2, center2 = column / 2 - 1; //starting at middle 2 indices move up one and down one until check is found while (flag == 0) { ((check == center1 || check == center2) ? flag : displacement)++; center1++; center2--; } //gives the center,vertical value for the bin passed to this function return (float)(displacement * INCREMENT) + (INCREMENT / 2.0); } // Find n highest indexes in the array void highest_index(int *h_binarray) { const int size = ((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT); const int col = (((RXBOUND - LXBOUND)*(RXBOUND - LXBOUND)) / ((RXBOUND + UYBOUND) * INCREMENT)); int *index = new int[size]; for (int i = 0; i < size; ++i) index[i] = i; bool stop = true; int temp, temp2; // Bubble sort for (int i = 1; (i <= size) && stop; ++i) { stop = false; for (int j = 0; j < (size - 1); ++j) { if (h_binarray[j + 1] > h_binarray[j]) { temp = h_binarray[j]; temp2 = index[j]; h_binarray[j] = h_binarray[j + 1]; index[j] = index[j + 1]; h_binarray[j + 1] = temp; index[j + 1] = temp2; stop = true; } } } //use highest values for slope & intercept float totalslope = 0.0, totalintercept = 0.0; for (int i = 0; i < NUM_LINES; ++i) { const float slope = slopeCalculator(index[i]); const float intercept = interceptCalculator(index[i]); std::cout << "[" << i << "]: "; if (index[i] < (size / 2)) { std::cout << "slope= -" << slope << " and " << std::endl; totalslope = totalslope - slope; } else { std::cout << "slope = " << slope << " and " << std::endl; totalslope = totalslope + slope; } if (index[i] % col < (col / 2)) { std::cout << " and intercept = " << intercept << std::endl << "From point: " << index[i] << std::endl; totalintercept = totalintercept + intercept; } else { std::cout << " and intercept = -" << intercept << std::endl << "From point: " << index[i] << std::endl; totalintercept = totalintercept - intercept; } std::cout << "with value = " << h_binarray[i] << std::endl; } std::cout << "=============" << std::endl; std::cout << "The average of these slopes is: " << totalslope / NUM_LINES << std::endl; std::cout << "The average of these intercept is: " << totalintercept / NUM_LINES << std::endl; std::cout << std::endl; } //kernel functions __global__ void kernelHough(int size, int* d_binarray) { /* take a piece of the array. discretize into y=mx+b format per point. check all points and increment all bins touched at the end recombine all shared memory to a global bin tally. Take the most significant X numbers as lines. discretized from point(1,1) ==(m,n)==> (-1,1) check each bin for count and sum them to a global array in sync NUM of coordinates will check all bins for their own equation and increment appropriately */ // Number from 0 through arraysize / 2 const int thread = 2 * (blockDim.x * blockIdx.x + threadIdx.x); // Slope is discretized space = -x const float slope = -1.0 * d_coordarray[thread]; // Intercept in discretized space = y const float intercept = d_coordarray[thread + 1]; int counter = 0;//keeps current array index being checked //loop through entire graph for (float x = LXBOUND; x < RXBOUND; x += INCREMENT) { const float xMin = x; const float xMax = x + INCREMENT; for (float y = UYBOUND; y > LYBOUND; y -= INCREMENT) { const float yMin = y - INCREMENT; const float yMax = y; //calculates possible y range associated with the known x range const float lower_range = slope * xMin + intercept; const float upper_range = slope * xMax + intercept; //if the possible y ranges corresponding to the x values exist within the actual y range increment bin if ((lower_range <= yMax && lower_range >= yMin) || (upper_range <= yMax && upper_range >= yMin)) atomicAdd(&d_binarray[counter], 1);//increment bin, protected from race condition counter++; } } } //prep function void houghTransform(int* h_input_array, int size) { int *d_binarray; int *h_binarray = new int[((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)]; // Length of the square grid for bins * size of int const int binarraysize = (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) * sizeof(int); const int coordarraysize = size * sizeof(int); // Copy coordinates to Constant Memory cudaMemcpyToSymbol(d_coordarray, h_input_array, coordarraysize); cudaMalloc((void**)&d_binarray, binarraysize); // 1-D Block dim3 myBlockDim(1, 1, 1); // ((size / 2), 1, 1); 1d grid dim3 myGridDim((size/2), 1, 1); kernelHough <<<myGridDim, myBlockDim>>> (size, d_binarray); cudaMemcpy(h_binarray, d_binarray, binarraysize, cudaMemcpyDeviceToHost); if(INCREMENT>=0.5)printVotes(h_binarray); highest_index(h_binarray); } int main() { // Seed RNG srand(time(0)); // Test case array int test[ARRAY_SIZE] = { 1,3,2,5,3,7,4,9,5,11,6,13,7,15,8,17,9,19,10,21}; // Random array initializer int *random = new int[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; ++i) random[i] = (rand() % 10) + 1; // Begin test function houghTransform(test, ARRAY_SIZE); return 0; }
#include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> #define iceil(num, den) (num + den - 1) / den #define ARRAY_SIZE 20 //must be an even number; this number/2 = number of points //sets random array and constant mem size //#define BIN 100 //divides the grid into square bins to vote on. perfect square value #define NUM_LINES 4 //top X voted lines. Picks first X Largest from top left to bottom right of grid space. /*GRID evaluated for bin voting * Must always be a square grid with origin at center */ #define dimension 5 #define LXBOUND (-1*dimension) //lowest X #define RXBOUND (dimension) //highest X #define LYBOUND (-1*dimension) //lowest Y #define UYBOUND (dimension) //highest Y //////////////////////////////// #define INCREMENT 1 //precision, length of 1 side of the square(bin) //The (abs)difference between between two sides is the length of the grid. Length/Increment determines how many bins #define column (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT) __constant__ int d_coordarray[ARRAY_SIZE];//Place coordinates in constant memory //show grid with votes. Becomes unuseful when bins > 20x20 void printVotes(int *h_binarray) { // Number of columns for (int i = 0; i < column; ++i) { for (int j = 0; j < column * column; j += column) std::cout << h_binarray[i + j] << "\t"; std::cout << std::endl; } } // Convert from array index to representative slope float slopeCalculator(int index) { const int center = ((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT) / 2; int displacement = 0, flag = 0; int change = column; //from the center, compare columns incrementing by column length until index is found while (flag == 0) { if (index <= center + change && index >= center - change) { flag++; } else { change += column; displacement++; } } //gives the center horizontal value for the bin passed to this function return (displacement * INCREMENT) + (INCREMENT / 2.0); } // Convert from array index to representative intercept float interceptCalculator(int index) { const int col = ((((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT)); const int check = index % col;//represents the displacement shifted into the first column float displacement = 0.0, flag = 0; int center1 = column / 2, center2 = column / 2 - 1; //starting at middle 2 indices move up one and down one until check is found while (flag == 0) { ((check == center1 || check == center2) ? flag : displacement)++; center1++; center2--; } //gives the center,vertical value for the bin passed to this function return (float)(displacement * INCREMENT) + (INCREMENT / 2.0); } // Find n highest indexes in the array void highest_index(int *h_binarray) { const int size = ((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT); const int col = (((RXBOUND - LXBOUND)*(RXBOUND - LXBOUND)) / ((RXBOUND + UYBOUND) * INCREMENT)); int *index = new int[size]; for (int i = 0; i < size; ++i) index[i] = i; bool stop = true; int temp, temp2; // Bubble sort for (int i = 1; (i <= size) && stop; ++i) { stop = false; for (int j = 0; j < (size - 1); ++j) { if (h_binarray[j + 1] > h_binarray[j]) { temp = h_binarray[j]; temp2 = index[j]; h_binarray[j] = h_binarray[j + 1]; index[j] = index[j + 1]; h_binarray[j + 1] = temp; index[j + 1] = temp2; stop = true; } } } //use highest values for slope & intercept float totalslope = 0.0, totalintercept = 0.0; for (int i = 0; i < NUM_LINES; ++i) { const float slope = slopeCalculator(index[i]); const float intercept = interceptCalculator(index[i]); std::cout << "[" << i << "]: "; if (index[i] < (size / 2)) { std::cout << "slope= -" << slope << " and " << std::endl; totalslope = totalslope - slope; } else { std::cout << "slope = " << slope << " and " << std::endl; totalslope = totalslope + slope; } if (index[i] % col < (col / 2)) { std::cout << " and intercept = " << intercept << std::endl << "From point: " << index[i] << std::endl; totalintercept = totalintercept + intercept; } else { std::cout << " and intercept = -" << intercept << std::endl << "From point: " << index[i] << std::endl; totalintercept = totalintercept - intercept; } std::cout << "with value = " << h_binarray[i] << std::endl; } std::cout << "=============" << std::endl; std::cout << "The average of these slopes is: " << totalslope / NUM_LINES << std::endl; std::cout << "The average of these intercept is: " << totalintercept / NUM_LINES << std::endl; std::cout << std::endl; } //kernel functions __global__ void kernelHough(int size, int* d_binarray) { /* take a piece of the array. discretize into y=mx+b format per point. check all points and increment all bins touched at the end recombine all shared memory to a global bin tally. Take the most significant X numbers as lines. discretized from point(1,1) ==(m,n)==> (-1,1) check each bin for count and sum them to a global array in sync NUM of coordinates will check all bins for their own equation and increment appropriately */ // Number from 0 through arraysize / 2 const int thread = 2 * (blockDim.x * blockIdx.x + threadIdx.x); // Slope is discretized space = -x const float slope = -1.0 * d_coordarray[thread]; // Intercept in discretized space = y const float intercept = d_coordarray[thread + 1]; int counter = 0;//keeps current array index being checked //loop through entire graph for (float x = LXBOUND; x < RXBOUND; x += INCREMENT) { const float xMin = x; const float xMax = x + INCREMENT; for (float y = UYBOUND; y > LYBOUND; y -= INCREMENT) { const float yMin = y - INCREMENT; const float yMax = y; //calculates possible y range associated with the known x range const float lower_range = slope * xMin + intercept; const float upper_range = slope * xMax + intercept; //if the possible y ranges corresponding to the x values exist within the actual y range increment bin if ((lower_range <= yMax && lower_range >= yMin) || (upper_range <= yMax && upper_range >= yMin)) atomicAdd(&d_binarray[counter], 1);//increment bin, protected from race condition counter++; } } } //prep function void houghTransform(int* h_input_array, int size) { int *d_binarray; int *h_binarray = new int[((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)]; // Length of the square grid for bins * size of int const int binarraysize = (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) * sizeof(int); const int coordarraysize = size * sizeof(int); // Copy coordinates to Constant Memory hipMemcpyToSymbol(HIP_SYMBOL(d_coordarray), h_input_array, coordarraysize); hipMalloc((void**)&d_binarray, binarraysize); // 1-D Block dim3 myBlockDim(1, 1, 1); // ((size / 2), 1, 1); 1d grid dim3 myGridDim((size/2), 1, 1); kernelHough <<<myGridDim, myBlockDim>>> (size, d_binarray); hipMemcpy(h_binarray, d_binarray, binarraysize, hipMemcpyDeviceToHost); if(INCREMENT>=0.5)printVotes(h_binarray); highest_index(h_binarray); } int main() { // Seed RNG srand(time(0)); // Test case array int test[ARRAY_SIZE] = { 1,3,2,5,3,7,4,9,5,11,6,13,7,15,8,17,9,19,10,21}; // Random array initializer int *random = new int[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; ++i) random[i] = (rand() % 10) + 1; // Begin test function houghTransform(test, ARRAY_SIZE); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> #define iceil(num, den) (num + den - 1) / den #define ARRAY_SIZE 20 //must be an even number; this number/2 = number of points //sets random array and constant mem size //#define BIN 100 //divides the grid into square bins to vote on. perfect square value #define NUM_LINES 4 //top X voted lines. Picks first X Largest from top left to bottom right of grid space. /*GRID evaluated for bin voting * Must always be a square grid with origin at center */ #define dimension 5 #define LXBOUND (-1*dimension) //lowest X #define RXBOUND (dimension) //highest X #define LYBOUND (-1*dimension) //lowest Y #define UYBOUND (dimension) //highest Y //////////////////////////////// #define INCREMENT 1 //precision, length of 1 side of the square(bin) //The (abs)difference between between two sides is the length of the grid. Length/Increment determines how many bins #define column (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT) __constant__ int d_coordarray[ARRAY_SIZE];//Place coordinates in constant memory //show grid with votes. Becomes unuseful when bins > 20x20 void printVotes(int *h_binarray) { // Number of columns for (int i = 0; i < column; ++i) { for (int j = 0; j < column * column; j += column) std::cout << h_binarray[i + j] << "\t"; std::cout << std::endl; } } // Convert from array index to representative slope float slopeCalculator(int index) { const int center = ((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT) / 2; int displacement = 0, flag = 0; int change = column; //from the center, compare columns incrementing by column length until index is found while (flag == 0) { if (index <= center + change && index >= center - change) { flag++; } else { change += column; displacement++; } } //gives the center horizontal value for the bin passed to this function return (displacement * INCREMENT) + (INCREMENT / 2.0); } // Convert from array index to representative intercept float interceptCalculator(int index) { const int col = ((((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT)); const int check = index % col;//represents the displacement shifted into the first column float displacement = 0.0, flag = 0; int center1 = column / 2, center2 = column / 2 - 1; //starting at middle 2 indices move up one and down one until check is found while (flag == 0) { ((check == center1 || check == center2) ? flag : displacement)++; center1++; center2--; } //gives the center,vertical value for the bin passed to this function return (float)(displacement * INCREMENT) + (INCREMENT / 2.0); } // Find n highest indexes in the array void highest_index(int *h_binarray) { const int size = ((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT); const int col = (((RXBOUND - LXBOUND)*(RXBOUND - LXBOUND)) / ((RXBOUND + UYBOUND) * INCREMENT)); int *index = new int[size]; for (int i = 0; i < size; ++i) index[i] = i; bool stop = true; int temp, temp2; // Bubble sort for (int i = 1; (i <= size) && stop; ++i) { stop = false; for (int j = 0; j < (size - 1); ++j) { if (h_binarray[j + 1] > h_binarray[j]) { temp = h_binarray[j]; temp2 = index[j]; h_binarray[j] = h_binarray[j + 1]; index[j] = index[j + 1]; h_binarray[j + 1] = temp; index[j + 1] = temp2; stop = true; } } } //use highest values for slope & intercept float totalslope = 0.0, totalintercept = 0.0; for (int i = 0; i < NUM_LINES; ++i) { const float slope = slopeCalculator(index[i]); const float intercept = interceptCalculator(index[i]); std::cout << "[" << i << "]: "; if (index[i] < (size / 2)) { std::cout << "slope= -" << slope << " and " << std::endl; totalslope = totalslope - slope; } else { std::cout << "slope = " << slope << " and " << std::endl; totalslope = totalslope + slope; } if (index[i] % col < (col / 2)) { std::cout << " and intercept = " << intercept << std::endl << "From point: " << index[i] << std::endl; totalintercept = totalintercept + intercept; } else { std::cout << " and intercept = -" << intercept << std::endl << "From point: " << index[i] << std::endl; totalintercept = totalintercept - intercept; } std::cout << "with value = " << h_binarray[i] << std::endl; } std::cout << "=============" << std::endl; std::cout << "The average of these slopes is: " << totalslope / NUM_LINES << std::endl; std::cout << "The average of these intercept is: " << totalintercept / NUM_LINES << std::endl; std::cout << std::endl; } //kernel functions __global__ void kernelHough(int size, int* d_binarray) { /* take a piece of the array. discretize into y=mx+b format per point. check all points and increment all bins touched at the end recombine all shared memory to a global bin tally. Take the most significant X numbers as lines. discretized from point(1,1) ==(m,n)==> (-1,1) check each bin for count and sum them to a global array in sync NUM of coordinates will check all bins for their own equation and increment appropriately */ // Number from 0 through arraysize / 2 const int thread = 2 * (blockDim.x * blockIdx.x + threadIdx.x); // Slope is discretized space = -x const float slope = -1.0 * d_coordarray[thread]; // Intercept in discretized space = y const float intercept = d_coordarray[thread + 1]; int counter = 0;//keeps current array index being checked //loop through entire graph for (float x = LXBOUND; x < RXBOUND; x += INCREMENT) { const float xMin = x; const float xMax = x + INCREMENT; for (float y = UYBOUND; y > LYBOUND; y -= INCREMENT) { const float yMin = y - INCREMENT; const float yMax = y; //calculates possible y range associated with the known x range const float lower_range = slope * xMin + intercept; const float upper_range = slope * xMax + intercept; //if the possible y ranges corresponding to the x values exist within the actual y range increment bin if ((lower_range <= yMax && lower_range >= yMin) || (upper_range <= yMax && upper_range >= yMin)) atomicAdd(&d_binarray[counter], 1);//increment bin, protected from race condition counter++; } } } //prep function void houghTransform(int* h_input_array, int size) { int *d_binarray; int *h_binarray = new int[((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)]; // Length of the square grid for bins * size of int const int binarraysize = (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) * sizeof(int); const int coordarraysize = size * sizeof(int); // Copy coordinates to Constant Memory hipMemcpyToSymbol(HIP_SYMBOL(d_coordarray), h_input_array, coordarraysize); hipMalloc((void**)&d_binarray, binarraysize); // 1-D Block dim3 myBlockDim(1, 1, 1); // ((size / 2), 1, 1); 1d grid dim3 myGridDim((size/2), 1, 1); kernelHough <<<myGridDim, myBlockDim>>> (size, d_binarray); hipMemcpy(h_binarray, d_binarray, binarraysize, hipMemcpyDeviceToHost); if(INCREMENT>=0.5)printVotes(h_binarray); highest_index(h_binarray); } int main() { // Seed RNG srand(time(0)); // Test case array int test[ARRAY_SIZE] = { 1,3,2,5,3,7,4,9,5,11,6,13,7,15,8,17,9,19,10,21}; // Random array initializer int *random = new int[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; ++i) random[i] = (rand() % 10) + 1; // Begin test function houghTransform(test, ARRAY_SIZE); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z11kernelHoughiPi .globl _Z11kernelHoughiPi .p2align 8 .type _Z11kernelHoughiPi,@function _Z11kernelHoughiPi: s_load_b32 s2, s[0:1], 0x1c s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1) s_mul_i32 s15, s15, s2 s_getpc_b64 s[2:3] s_add_u32 s2, s2, d_coordarray@rel32@lo+4 s_addc_u32 s3, s3, d_coordarray@rel32@hi+12 v_add_lshl_u32 v0, s15, v0, 1 v_or_b32_e32 v2, 1, v0 v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[0:1], 2, v[0:1] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[2:3], 2, v[2:3] v_add_co_u32 v0, vcc_lo, v0, s2 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo v_add_co_u32 v2, vcc_lo, v2, s2 s_delay_alu instid0(VALU_DEP_4) v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo s_load_b64 s[2:3], s[0:1], 0x8 s_mov_b32 s1, 0 s_clause 0x1 global_load_b32 v0, v[0:1], off global_load_b32 v2, v[2:3], off v_mov_b32_e32 v1, 0xc0a00000 v_mov_b32_e32 v3, 0 s_waitcnt vmcnt(1) v_cvt_f32_i32_e32 v0, v0 s_waitcnt vmcnt(0) v_cvt_f32_i32_e32 v2, v2 s_branch .LBB0_2 .LBB0_1: s_set_inst_prefetch_distance 0x2 v_cmp_gt_f32_e32 vcc_lo, 0x40a00000, v1 s_add_i32 s1, s1, s4 s_cbranch_vccz .LBB0_10 .LBB0_2: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v7, 0x40a00000 s_mov_b32 s4, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_add_f32_e32 v1, 1.0, v4 v_fma_f32 v4, -v4, v0, v2 v_fma_f32 v5, -v1, v0, v2 s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB0_3: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_f32_e32 v6, -1.0, v7 v_cmp_nle_f32_e32 vcc_lo, v4, v7 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_nge_f32_e64 s0, v4, v6 s_or_b32 s0, vcc_lo, s0 s_delay_alu instid0(SALU_CYCLE_1) s_xor_b32 s5, s0, -1 s_and_saveexec_b32 s6, s0 v_cmp_le_f32_e32 vcc_lo, v5, v7 v_cmp_ge_f32_e64 s0, v5, v6 s_and_not1_b32 s5, s5, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s0, vcc_lo, s0 s_and_b32 s0, s0, exec_lo s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 s5, s5, s0 s_or_b32 exec_lo, exec_lo, s6 s_and_saveexec_b32 s0, s5 s_cbranch_execz .LBB0_8 s_mov_b32 s5, exec_lo s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mbcnt_lo_u32_b32 v7, s5, 0 v_cmp_eq_u32_e32 vcc_lo, 0, v7 s_and_b32 s6, exec_lo, vcc_lo s_delay_alu instid0(SALU_CYCLE_1) s_mov_b32 exec_lo, s6 s_cbranch_execz .LBB0_8 s_add_i32 s6, s1, s4 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_ashr_i32 s7, s6, 31 s_lshl_b64 s[6:7], s[6:7], 2 s_waitcnt lgkmcnt(0) s_add_u32 s6, s2, s6 s_addc_u32 s7, s3, s7 s_bcnt1_i32_b32 s5, s5 s_delay_alu instid0(SALU_CYCLE_1) v_mov_b32_e32 v7, s5 global_atomic_add_u32 v3, v7, s[6:7] .LBB0_8: s_or_b32 exec_lo, exec_lo, s0 s_add_i32 s4, s4, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s4, 10 s_cbranch_scc1 .LBB0_1 v_mov_b32_e32 v7, v6 s_branch .LBB0_3 .LBB0_10: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11kernelHoughiPi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z11kernelHoughiPi, .Lfunc_end0-_Z11kernelHoughiPi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .protected d_coordarray .type d_coordarray,@object .section .bss,"aw",@nobits .globl d_coordarray .p2align 4, 0x0 d_coordarray: .zero 80 .size d_coordarray, 80 .type __hip_cuid_,@object .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym d_coordarray .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11kernelHoughiPi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z11kernelHoughiPi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> #define iceil(num, den) (num + den - 1) / den #define ARRAY_SIZE 20 //must be an even number; this number/2 = number of points //sets random array and constant mem size //#define BIN 100 //divides the grid into square bins to vote on. perfect square value #define NUM_LINES 4 //top X voted lines. Picks first X Largest from top left to bottom right of grid space. /*GRID evaluated for bin voting * Must always be a square grid with origin at center */ #define dimension 5 #define LXBOUND (-1*dimension) //lowest X #define RXBOUND (dimension) //highest X #define LYBOUND (-1*dimension) //lowest Y #define UYBOUND (dimension) //highest Y //////////////////////////////// #define INCREMENT 1 //precision, length of 1 side of the square(bin) //The (abs)difference between between two sides is the length of the grid. Length/Increment determines how many bins #define column (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT) __constant__ int d_coordarray[ARRAY_SIZE];//Place coordinates in constant memory //show grid with votes. Becomes unuseful when bins > 20x20 void printVotes(int *h_binarray) { // Number of columns for (int i = 0; i < column; ++i) { for (int j = 0; j < column * column; j += column) std::cout << h_binarray[i + j] << "\t"; std::cout << std::endl; } } // Convert from array index to representative slope float slopeCalculator(int index) { const int center = ((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT) / 2; int displacement = 0, flag = 0; int change = column; //from the center, compare columns incrementing by column length until index is found while (flag == 0) { if (index <= center + change && index >= center - change) { flag++; } else { change += column; displacement++; } } //gives the center horizontal value for the bin passed to this function return (displacement * INCREMENT) + (INCREMENT / 2.0); } // Convert from array index to representative intercept float interceptCalculator(int index) { const int col = ((((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT)); const int check = index % col;//represents the displacement shifted into the first column float displacement = 0.0, flag = 0; int center1 = column / 2, center2 = column / 2 - 1; //starting at middle 2 indices move up one and down one until check is found while (flag == 0) { ((check == center1 || check == center2) ? flag : displacement)++; center1++; center2--; } //gives the center,vertical value for the bin passed to this function return (float)(displacement * INCREMENT) + (INCREMENT / 2.0); } // Find n highest indexes in the array void highest_index(int *h_binarray) { const int size = ((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT); const int col = (((RXBOUND - LXBOUND)*(RXBOUND - LXBOUND)) / ((RXBOUND + UYBOUND) * INCREMENT)); int *index = new int[size]; for (int i = 0; i < size; ++i) index[i] = i; bool stop = true; int temp, temp2; // Bubble sort for (int i = 1; (i <= size) && stop; ++i) { stop = false; for (int j = 0; j < (size - 1); ++j) { if (h_binarray[j + 1] > h_binarray[j]) { temp = h_binarray[j]; temp2 = index[j]; h_binarray[j] = h_binarray[j + 1]; index[j] = index[j + 1]; h_binarray[j + 1] = temp; index[j + 1] = temp2; stop = true; } } } //use highest values for slope & intercept float totalslope = 0.0, totalintercept = 0.0; for (int i = 0; i < NUM_LINES; ++i) { const float slope = slopeCalculator(index[i]); const float intercept = interceptCalculator(index[i]); std::cout << "[" << i << "]: "; if (index[i] < (size / 2)) { std::cout << "slope= -" << slope << " and " << std::endl; totalslope = totalslope - slope; } else { std::cout << "slope = " << slope << " and " << std::endl; totalslope = totalslope + slope; } if (index[i] % col < (col / 2)) { std::cout << " and intercept = " << intercept << std::endl << "From point: " << index[i] << std::endl; totalintercept = totalintercept + intercept; } else { std::cout << " and intercept = -" << intercept << std::endl << "From point: " << index[i] << std::endl; totalintercept = totalintercept - intercept; } std::cout << "with value = " << h_binarray[i] << std::endl; } std::cout << "=============" << std::endl; std::cout << "The average of these slopes is: " << totalslope / NUM_LINES << std::endl; std::cout << "The average of these intercept is: " << totalintercept / NUM_LINES << std::endl; std::cout << std::endl; } //kernel functions __global__ void kernelHough(int size, int* d_binarray) { /* take a piece of the array. discretize into y=mx+b format per point. check all points and increment all bins touched at the end recombine all shared memory to a global bin tally. Take the most significant X numbers as lines. discretized from point(1,1) ==(m,n)==> (-1,1) check each bin for count and sum them to a global array in sync NUM of coordinates will check all bins for their own equation and increment appropriately */ // Number from 0 through arraysize / 2 const int thread = 2 * (blockDim.x * blockIdx.x + threadIdx.x); // Slope is discretized space = -x const float slope = -1.0 * d_coordarray[thread]; // Intercept in discretized space = y const float intercept = d_coordarray[thread + 1]; int counter = 0;//keeps current array index being checked //loop through entire graph for (float x = LXBOUND; x < RXBOUND; x += INCREMENT) { const float xMin = x; const float xMax = x + INCREMENT; for (float y = UYBOUND; y > LYBOUND; y -= INCREMENT) { const float yMin = y - INCREMENT; const float yMax = y; //calculates possible y range associated with the known x range const float lower_range = slope * xMin + intercept; const float upper_range = slope * xMax + intercept; //if the possible y ranges corresponding to the x values exist within the actual y range increment bin if ((lower_range <= yMax && lower_range >= yMin) || (upper_range <= yMax && upper_range >= yMin)) atomicAdd(&d_binarray[counter], 1);//increment bin, protected from race condition counter++; } } } //prep function void houghTransform(int* h_input_array, int size) { int *d_binarray; int *h_binarray = new int[((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)]; // Length of the square grid for bins * size of int const int binarraysize = (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) * sizeof(int); const int coordarraysize = size * sizeof(int); // Copy coordinates to Constant Memory hipMemcpyToSymbol(HIP_SYMBOL(d_coordarray), h_input_array, coordarraysize); hipMalloc((void**)&d_binarray, binarraysize); // 1-D Block dim3 myBlockDim(1, 1, 1); // ((size / 2), 1, 1); 1d grid dim3 myGridDim((size/2), 1, 1); kernelHough <<<myGridDim, myBlockDim>>> (size, d_binarray); hipMemcpy(h_binarray, d_binarray, binarraysize, hipMemcpyDeviceToHost); if(INCREMENT>=0.5)printVotes(h_binarray); highest_index(h_binarray); } int main() { // Seed RNG srand(time(0)); // Test case array int test[ARRAY_SIZE] = { 1,3,2,5,3,7,4,9,5,11,6,13,7,15,8,17,9,19,10,21}; // Random array initializer int *random = new int[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; ++i) random[i] = (rand() % 10) + 1; // Begin test function houghTransform(test, ARRAY_SIZE); return 0; }
.text .file "kernel.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z10printVotesPi # -- Begin function _Z10printVotesPi .p2align 4, 0x90 .type _Z10printVotesPi,@function _Z10printVotesPi: # @_Z10printVotesPi .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %rbx xorl %r15d, %r15d jmp .LBB0_1 .p2align 4, 0x90 .LBB0_6: # in Loop: Header=BB0_1 Depth=1 movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r14), %rax movq %r14, %rdi movl $10, %esi callq *48(%rax) .LBB0_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit # in Loop: Header=BB0_1 Depth=1 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv incq %r15 addq $4, %rbx cmpq $10, %r15 je .LBB0_8 .LBB0_1: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB0_2 Depth 2 movq $-10, %r14 .p2align 4, 0x90 .LBB0_2: # Parent Loop BB0_1 Depth=1 # => This Inner Loop Header: Depth=2 movl 40(%rbx,%r14,4), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movl $.L.str, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l addq $10, %r14 cmpq $90, %r14 jb .LBB0_2 # %bb.3: # in Loop: Header=BB0_1 Depth=1 movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r14 testq %r14, %r14 je .LBB0_9 # %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i # in Loop: Header=BB0_1 Depth=1 cmpb $0, 56(%r14) je .LBB0_6 # %bb.5: # in Loop: Header=BB0_1 Depth=1 movzbl 67(%r14), %eax jmp .LBB0_7 .LBB0_8: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB0_9: .cfi_def_cfa_offset 32 callq _ZSt16__throw_bad_castv .Lfunc_end0: .size _Z10printVotesPi, .Lfunc_end0-_Z10printVotesPi .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z15slopeCalculatori .LCPI1_0: .quad 0x3fe0000000000000 # double 0.5 .text .globl _Z15slopeCalculatori .p2align 4, 0x90 .type _Z15slopeCalculatori,@function _Z15slopeCalculatori: # @_Z15slopeCalculatori .cfi_startproc # %bb.0: xorl %eax, %eax movl $10, %ecx jmp .LBB1_1 .p2align 4, 0x90 .LBB1_3: # in Loop: Header=BB1_1 Depth=1 movl %edx, %ecx je .LBB1_4 .LBB1_1: # =>This Inner Loop Header: Depth=1 leal 50(%rcx), %edx cmpl %edi, %edx setl %dl movl $50, %esi subl %ecx, %esi cmpl %edi, %esi setg %sil orb %dl, %sil leal 10(%rcx), %edx movzbl %sil, %esi addl %esi, %eax testb %sil, %sil jne .LBB1_3 # %bb.2: # in Loop: Header=BB1_1 Depth=1 movl %ecx, %edx jmp .LBB1_3 .LBB1_4: cvtsi2sd %eax, %xmm0 addsd .LCPI1_0(%rip), %xmm0 cvtsd2ss %xmm0, %xmm0 retq .Lfunc_end1: .size _Z15slopeCalculatori, .Lfunc_end1-_Z15slopeCalculatori .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z19interceptCalculatori .LCPI2_0: .long 0x3f800000 # float 1 .LCPI2_1: .long 0x3f000000 # float 0.5 .text .globl _Z19interceptCalculatori .p2align 4, 0x90 .type _Z19interceptCalculatori,@function _Z19interceptCalculatori: # @_Z19interceptCalculatori .cfi_startproc # %bb.0: movslq %edi, %rax imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $34, %rcx addl %edx, %ecx addl %ecx, %ecx leal (%rcx,%rcx,4), %ecx negl %ecx addl %ecx, %eax addl $-5, %eax xorps %xmm1, %xmm1 xorl %ecx, %ecx movss .LCPI2_0(%rip), %xmm2 # xmm2 = mem[0],zero,zero,zero xorps %xmm3, %xmm3 xorps %xmm0, %xmm0 jmp .LBB2_1 .p2align 4, 0x90 .LBB2_5: # %.cont # in Loop: Header=BB2_1 Depth=1 movaps %xmm4, %xmm0 .LBB2_6: # %.cont # in Loop: Header=BB2_1 Depth=1 incl %ecx ucomiss %xmm1, %xmm3 jne .LBB2_7 jp .LBB2_7 .LBB2_1: # %.cont # =>This Inner Loop Header: Depth=1 leal (%rax,%rcx), %edx cmpl %ecx, %eax sete %sil cmpl $-1, %edx sete %dl orb %sil, %dl movaps %xmm3, %xmm4 testb %dl, %dl jne .LBB2_3 # %bb.2: # %.cont # in Loop: Header=BB2_1 Depth=1 movaps %xmm0, %xmm4 .LBB2_3: # %.cont # in Loop: Header=BB2_1 Depth=1 addss %xmm2, %xmm4 je .LBB2_5 # %bb.4: # in Loop: Header=BB2_1 Depth=1 movaps %xmm4, %xmm3 jmp .LBB2_6 .LBB2_7: addss .LCPI2_1(%rip), %xmm0 retq .Lfunc_end2: .size _Z19interceptCalculatori, .Lfunc_end2-_Z19interceptCalculatori .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z13highest_indexPi .LCPI3_0: .quad 0x3fe0000000000000 # double 0.5 .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 .LCPI3_1: .long 0x3f800000 # float 1 .LCPI3_2: .long 0x3f000000 # float 0.5 .LCPI3_3: .long 0x3e800000 # float 0.25 .text .globl _Z13highest_indexPi .p2align 4, 0x90 .type _Z13highest_indexPi,@function _Z13highest_indexPi: # @_Z13highest_indexPi .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $40, %rsp .cfi_def_cfa_offset 96 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdi, %rbx movl $400, %edi # imm = 0x190 callq _Znam movq %rax, %r14 xorl %eax, %eax .p2align 4, 0x90 .LBB3_1: # =>This Inner Loop Header: Depth=1 movl %eax, (%r14,%rax,4) incq %rax cmpq $100, %rax jne .LBB3_1 # %bb.2: # %.preheader123.preheader movl $1, %eax .p2align 4, 0x90 .LBB3_3: # %.preheader123 # =>This Loop Header: Depth=1 # Child Loop BB3_4 Depth 2 movl $1, %edx xorl %ecx, %ecx jmp .LBB3_4 .p2align 4, 0x90 .LBB3_6: # in Loop: Header=BB3_4 Depth=2 incq %rdx cmpq $100, %rdx je .LBB3_7 .LBB3_4: # Parent Loop BB3_3 Depth=1 # => This Inner Loop Header: Depth=2 movl -4(%rbx,%rdx,4), %esi movl (%rbx,%rdx,4), %edi cmpl %esi, %edi jle .LBB3_6 # %bb.5: # in Loop: Header=BB3_4 Depth=2 movl -4(%r14,%rdx,4), %ecx movl %edi, -4(%rbx,%rdx,4) movl (%r14,%rdx,4), %edi movl %edi, -4(%r14,%rdx,4) movl %esi, (%rbx,%rdx,4) movl %ecx, (%r14,%rdx,4) movb $1, %cl jmp .LBB3_6 .p2align 4, 0x90 .LBB3_7: # in Loop: Header=BB3_3 Depth=1 cmpl $99, %eax ja .LBB3_9 # %bb.8: # in Loop: Header=BB3_3 Depth=1 incl %eax andb $1, %cl jne .LBB3_3 .LBB3_9: # %.preheader.preheader xorps %xmm3, %xmm3 xorl %r13d, %r13d movss .LCPI3_1(%rip), %xmm4 # xmm4 = mem[0],zero,zero,zero xorps %xmm0, %xmm0 movss %xmm0, 4(%rsp) # 4-byte Spill movss %xmm0, 8(%rsp) # 4-byte Spill jmp .LBB3_10 .p2align 4, 0x90 .LBB3_71: # in Loop: Header=BB3_10 Depth=1 movq %r12, %rdi movq %rax, %r15 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r12), %rax movq %r12, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r15, %rax .LBB3_72: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit122 # in Loop: Header=BB3_10 Depth=1 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv incq %r13 cmpq $4, %r13 xorps %xmm3, %xmm3 movss .LCPI3_1(%rip), %xmm4 # xmm4 = mem[0],zero,zero,zero je .LBB3_19 .LBB3_10: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB3_11 Depth 2 # Child Loop BB3_15 Depth 2 movl (%r14,%r13,4), %ebp movl $10, %ecx xorl %eax, %eax jmp .LBB3_11 .p2align 4, 0x90 .LBB3_13: # in Loop: Header=BB3_11 Depth=2 movl %edx, %ecx je .LBB3_14 .LBB3_11: # Parent Loop BB3_10 Depth=1 # => This Inner Loop Header: Depth=2 leal 50(%rcx), %edx cmpl %ebp, %edx setl %dl movl $50, %esi subl %ecx, %esi cmpl %ebp, %esi setg %sil orb %dl, %sil leal 10(%rcx), %edx movzbl %sil, %esi addl %esi, %eax testb %sil, %sil jne .LBB3_13 # %bb.12: # in Loop: Header=BB3_11 Depth=2 movl %ecx, %edx jmp .LBB3_13 .p2align 4, 0x90 .LBB3_14: # %_Z15slopeCalculatori.exit # in Loop: Header=BB3_10 Depth=1 xorps %xmm0, %xmm0 cvtsi2sd %eax, %xmm0 addsd .LCPI3_0(%rip), %xmm0 movslq %ebp, %rax imulq $1717986919, %rax, %rax # imm = 0x66666667 movq %rax, %rcx shrq $63, %rcx sarq $34, %rax addl %ecx, %eax addl %eax, %eax leal (%rax,%rax,4), %eax movl %ebp, %r15d subl %eax, %r15d xorps %xmm1, %xmm1 movl $4, %eax xorps %xmm5, %xmm5 jmp .LBB3_15 .p2align 4, 0x90 .LBB3_36: # %.cont.i # in Loop: Header=BB3_15 Depth=2 movaps %xmm2, %xmm5 .LBB3_37: # %.cont.i # in Loop: Header=BB3_15 Depth=2 decl %eax ucomiss %xmm3, %xmm1 jne .LBB3_38 jp .LBB3_38 .LBB3_15: # %.cont.i # Parent Loop BB3_10 Depth=1 # => This Inner Loop Header: Depth=2 leal (%r15,%rax), %ecx cmpl $9, %ecx sete %cl cmpl %eax, %r15d sete %dl orb %cl, %dl movaps %xmm1, %xmm2 testb %dl, %dl jne .LBB3_17 # %bb.16: # %.cont.i # in Loop: Header=BB3_15 Depth=2 movaps %xmm5, %xmm2 .LBB3_17: # %.cont.i # in Loop: Header=BB3_15 Depth=2 addss %xmm4, %xmm2 je .LBB3_36 # %bb.18: # in Loop: Header=BB3_15 Depth=2 movaps %xmm2, %xmm1 jmp .LBB3_37 .p2align 4, 0x90 .LBB3_38: # %_Z19interceptCalculatori.exit # in Loop: Header=BB3_10 Depth=1 cvtsd2ss %xmm0, %xmm0 movss %xmm0, 16(%rsp) # 4-byte Spill addss .LCPI3_2(%rip), %xmm5 movss %xmm5, 12(%rsp) # 4-byte Spill movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $1, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movq %r13, 24(%rsp) # 8-byte Spill movl %r13d, %esi callq _ZNSolsEi movl $.L.str.2, %esi movl $3, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movss 16(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movsd %xmm0, 32(%rsp) # 8-byte Spill movl $_ZSt4cout, %edi cmpl $49, %ebp jg .LBB3_44 # %bb.39: # in Loop: Header=BB3_10 Depth=1 movl $.L.str.3, %esi movl $8, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movsd 32(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero callq _ZNSo9_M_insertIdEERSoT_ movq %rax, %r12 movl $.L.str.4, %esi movl $5, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%r12), %rax movq -24(%rax), %rax movq 240(%r12,%rax), %r13 testq %r13, %r13 je .LBB3_73 # %bb.40: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i89 # in Loop: Header=BB3_10 Depth=1 cmpb $0, 56(%r13) je .LBB3_42 # %bb.41: # in Loop: Header=BB3_10 Depth=1 movzbl 67(%r13), %eax jmp .LBB3_43 .p2align 4, 0x90 .LBB3_44: # in Loop: Header=BB3_10 Depth=1 movl $.L.str.5, %esi movl $8, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movsd 32(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero callq _ZNSo9_M_insertIdEERSoT_ movq %rax, %r12 movl $.L.str.4, %esi movl $5, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq (%r12), %rax movq -24(%rax), %rax movq 240(%r12,%rax), %r13 testq %r13, %r13 je .LBB3_73 # %bb.45: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i94 # in Loop: Header=BB3_10 Depth=1 cmpb $0, 56(%r13) je .LBB3_47 # %bb.46: # in Loop: Header=BB3_10 Depth=1 movzbl 67(%r13), %eax jmp .LBB3_48 .LBB3_42: # in Loop: Header=BB3_10 Depth=1 movq %r13, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r13), %rax movq %r13, %rdi movl $10, %esi callq *48(%rax) .LBB3_43: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit92 # in Loop: Header=BB3_10 Depth=1 movsbl %al, %esi movq %r12, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movss 8(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero subss 16(%rsp), %xmm0 # 4-byte Folded Reload jmp .LBB3_49 .LBB3_47: # in Loop: Header=BB3_10 Depth=1 movq %r13, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r13), %rax movq %r13, %rdi movl $10, %esi callq *48(%rax) .LBB3_48: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit97 # in Loop: Header=BB3_10 Depth=1 movsbl %al, %esi movq %r12, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movss 8(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero addss 16(%rsp), %xmm0 # 4-byte Folded Reload .LBB3_49: # in Loop: Header=BB3_10 Depth=1 movss %xmm0, 8(%rsp) # 4-byte Spill movq 24(%rsp), %r13 # 8-byte Reload movss 12(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movsd %xmm0, 16(%rsp) # 8-byte Spill movl $_ZSt4cout, %edi cmpl $4, %r15d jg .LBB3_59 # %bb.50: # in Loop: Header=BB3_10 Depth=1 movl $.L.str.6, %esi movl $17, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movsd 16(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero callq _ZNSo9_M_insertIdEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r12 testq %r12, %r12 je .LBB3_73 # %bb.51: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i99 # in Loop: Header=BB3_10 Depth=1 cmpb $0, 56(%r12) je .LBB3_53 # %bb.52: # in Loop: Header=BB3_10 Depth=1 movzbl 67(%r12), %ecx jmp .LBB3_54 .p2align 4, 0x90 .LBB3_59: # in Loop: Header=BB3_10 Depth=1 movl $.L.str.8, %esi movl $18, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movsd 16(%rsp), %xmm0 # 8-byte Reload # xmm0 = mem[0],zero callq _ZNSo9_M_insertIdEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r12 testq %r12, %r12 je .LBB3_73 # %bb.60: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i109 # in Loop: Header=BB3_10 Depth=1 cmpb $0, 56(%r12) je .LBB3_62 # %bb.61: # in Loop: Header=BB3_10 Depth=1 movzbl 67(%r12), %ecx jmp .LBB3_63 .LBB3_53: # in Loop: Header=BB3_10 Depth=1 movq %r12, %rdi movq %rax, %r15 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r12), %rax movq %r12, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r15, %rax .LBB3_54: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit102 # in Loop: Header=BB3_10 Depth=1 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq %rax, %r12 movl $.L.str.7, %esi movl $12, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq %r12, %rdi movl %ebp, %esi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r12 testq %r12, %r12 je .LBB3_73 # %bb.55: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i104 # in Loop: Header=BB3_10 Depth=1 cmpb $0, 56(%r12) je .LBB3_57 # %bb.56: # in Loop: Header=BB3_10 Depth=1 movzbl 67(%r12), %ecx jmp .LBB3_58 .LBB3_62: # in Loop: Header=BB3_10 Depth=1 movq %r12, %rdi movq %rax, %r15 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r12), %rax movq %r12, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r15, %rax .LBB3_63: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit112 # in Loop: Header=BB3_10 Depth=1 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq %rax, %r12 movl $.L.str.7, %esi movl $12, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq %r12, %rdi movl %ebp, %esi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r12 testq %r12, %r12 je .LBB3_73 # %bb.64: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i114 # in Loop: Header=BB3_10 Depth=1 cmpb $0, 56(%r12) je .LBB3_66 # %bb.65: # in Loop: Header=BB3_10 Depth=1 movzbl 67(%r12), %ecx jmp .LBB3_67 .LBB3_57: # in Loop: Header=BB3_10 Depth=1 movq %r12, %rdi movq %rax, %r15 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r12), %rax movq %r12, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r15, %rax .LBB3_58: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit107 # in Loop: Header=BB3_10 Depth=1 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movss 4(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero addss 12(%rsp), %xmm0 # 4-byte Folded Reload jmp .LBB3_68 .LBB3_66: # in Loop: Header=BB3_10 Depth=1 movq %r12, %rdi movq %rax, %r15 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r12), %rax movq %r12, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r15, %rax .LBB3_67: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit117 # in Loop: Header=BB3_10 Depth=1 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movss 4(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero subss 12(%rsp), %xmm0 # 4-byte Folded Reload .LBB3_68: # in Loop: Header=BB3_10 Depth=1 movss %xmm0, 4(%rsp) # 4-byte Spill movl $_ZSt4cout, %edi movl $.L.str.9, %esi movl $13, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl (%rbx,%r13,4), %esi movl $_ZSt4cout, %edi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r12 testq %r12, %r12 je .LBB3_73 # %bb.69: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i119 # in Loop: Header=BB3_10 Depth=1 cmpb $0, 56(%r12) je .LBB3_71 # %bb.70: # in Loop: Header=BB3_10 Depth=1 movzbl 67(%r12), %ecx jmp .LBB3_72 .LBB3_19: movl $_ZSt4cout, %edi movl $.L.str.10, %esi movl $13, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB3_73 # %bb.20: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%rbx) je .LBB3_22 # %bb.21: movzbl 67(%rbx), %eax jmp .LBB3_23 .LBB3_22: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB3_23: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $_ZSt4cout, %edi movl $.L.str.11, %esi movl $32, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movss 8(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero mulss .LCPI3_3(%rip), %xmm0 cvtss2sd %xmm0, %xmm0 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB3_73 # %bb.24: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i74 cmpb $0, 56(%rbx) je .LBB3_26 # %bb.25: movzbl 67(%rbx), %ecx jmp .LBB3_27 .LBB3_26: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB3_27: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit77 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movl $_ZSt4cout, %edi movl $.L.str.12, %esi movl $35, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movss 4(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero mulss .LCPI3_3(%rip), %xmm0 cvtss2sd %xmm0, %xmm0 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %rbx testq %rbx, %rbx je .LBB3_73 # %bb.28: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i79 cmpb $0, 56(%rbx) je .LBB3_30 # %bb.29: movzbl 67(%rbx), %ecx jmp .LBB3_31 .LBB3_30: movq %rbx, %rdi movq %rax, %r14 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r14, %rax .LBB3_31: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit82 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB3_73 # %bb.32: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i84 cmpb $0, 56(%rbx) je .LBB3_34 # %bb.33: movzbl 67(%rbx), %eax jmp .LBB3_35 .LBB3_34: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) .LBB3_35: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit87 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp _ZNSo5flushEv # TAILCALL .LBB3_73: .cfi_def_cfa_offset 96 callq _ZSt16__throw_bad_castv .Lfunc_end3: .size _Z13highest_indexPi, .Lfunc_end3-_Z13highest_indexPi .cfi_endproc # -- End function .globl _Z26__device_stub__kernelHoughiPi # -- Begin function _Z26__device_stub__kernelHoughiPi .p2align 4, 0x90 .type _Z26__device_stub__kernelHoughiPi,@function _Z26__device_stub__kernelHoughiPi: # @_Z26__device_stub__kernelHoughiPi .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movl %edi, 4(%rsp) movq %rsi, 56(%rsp) leaq 4(%rsp), %rax movq %rax, 64(%rsp) leaq 56(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z11kernelHoughiPi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end4: .size _Z26__device_stub__kernelHoughiPi, .Lfunc_end4-_Z26__device_stub__kernelHoughiPi .cfi_endproc # -- End function .globl _Z14houghTransformPii # -- Begin function _Z14houghTransformPii .p2align 4, 0x90 .type _Z14houghTransformPii,@function _Z14houghTransformPii: # @_Z14houghTransformPii .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $96, %rsp .cfi_def_cfa_offset 128 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %esi, %r14d movq %rdi, %r15 movl $400, %edi # imm = 0x190 callq _Znam movq %rax, %rbx leal (,%r14,4), %eax movslq %eax, %rdx movl $d_coordarray, %edi movq %r15, %rsi xorl %ecx, %ecx movl $1, %r8d callq hipMemcpyToSymbol leaq 16(%rsp), %rdi movl $400, %esi # imm = 0x190 callq hipMalloc movl %r14d, %edi shrl $31, %edi addl %r14d, %edi sarl %edi movabsq $4294967296, %rdx # imm = 0x100000000 orq %rdx, %rdi orq $1, %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_2 # %bb.1: movq 16(%rsp), %rax movl %r14d, 12(%rsp) movq %rax, 72(%rsp) leaq 12(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z11kernelHoughiPi, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB5_2: movq 16(%rsp), %rsi movl $400, %edx # imm = 0x190 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movq %rbx, %rdi callq _Z10printVotesPi movq %rbx, %rdi callq _Z13highest_indexPi addq $96, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size _Z14houghTransformPii, .Lfunc_end5-_Z14houghTransformPii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $80, %rsp .cfi_def_cfa_offset 96 .cfi_offset %rbx, -16 xorl %edi, %edi callq time movl %eax, %edi callq srand movaps .L__const.main.test+64(%rip), %xmm0 movaps %xmm0, 64(%rsp) movaps .L__const.main.test+48(%rip), %xmm0 movaps %xmm0, 48(%rsp) movaps .L__const.main.test+32(%rip), %xmm0 movaps %xmm0, 32(%rsp) movaps .L__const.main.test+16(%rip), %xmm0 movaps %xmm0, 16(%rsp) movaps .L__const.main.test(%rip), %xmm0 movaps %xmm0, (%rsp) movl $20, %ebx .p2align 4, 0x90 .LBB6_1: # =>This Inner Loop Header: Depth=1 callq rand decl %ebx jne .LBB6_1 # %bb.2: movq %rsp, %rdi movl $20, %esi callq _Z14houghTransformPii xorl %eax, %eax addq $80, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end6: .size main, .Lfunc_end6-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB7_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB7_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11kernelHoughiPi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $0, 8(%rsp) movl $1, (%rsp) movl $d_coordarray, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movl $80, %r9d movq %rbx, %rdi xorl %r8d, %r8d callq __hipRegisterVar movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end7: .size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB8_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB8_2: retq .Lfunc_end8: .size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor .cfi_endproc # -- End function .type d_coordarray,@object # @d_coordarray .local d_coordarray .comm d_coordarray,80,16 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "\t" .size .L.str, 2 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "[" .size .L.str.1, 2 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "]: " .size .L.str.2, 4 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "slope= -" .size .L.str.3, 9 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz " and " .size .L.str.4, 6 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "slope = " .size .L.str.5, 9 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz " and intercept = " .size .L.str.6, 18 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "From point: " .size .L.str.7, 13 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz " and intercept = -" .size .L.str.8, 19 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz "with value = " .size .L.str.9, 14 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "=============" .size .L.str.10, 14 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz "The average of these slopes is: " .size .L.str.11, 33 .type .L.str.12,@object # @.str.12 .L.str.12: .asciz "The average of these intercept is: " .size .L.str.12, 36 .type _Z11kernelHoughiPi,@object # @_Z11kernelHoughiPi .section .rodata,"a",@progbits .globl _Z11kernelHoughiPi .p2align 3, 0x0 _Z11kernelHoughiPi: .quad _Z26__device_stub__kernelHoughiPi .size _Z11kernelHoughiPi, 8 .type .L__const.main.test,@object # @__const.main.test .p2align 4, 0x0 .L__const.main.test: .long 1 # 0x1 .long 3 # 0x3 .long 2 # 0x2 .long 5 # 0x5 .long 3 # 0x3 .long 7 # 0x7 .long 4 # 0x4 .long 9 # 0x9 .long 5 # 0x5 .long 11 # 0xb .long 6 # 0x6 .long 13 # 0xd .long 7 # 0x7 .long 15 # 0xf .long 8 # 0x8 .long 17 # 0x11 .long 9 # 0x9 .long 19 # 0x13 .long 10 # 0xa .long 21 # 0x15 .size .L__const.main.test, 80 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z11kernelHoughiPi" .size .L__unnamed_1, 19 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "d_coordarray" .size .L__unnamed_2, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z26__device_stub__kernelHoughiPi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym d_coordarray .addrsig_sym _ZSt4cout .addrsig_sym _Z11kernelHoughiPi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z11kernelHoughiPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ IMAD.MOV.U32 R8, RZ, RZ, -0x3f600000 ; /* 0xc0a00000ff087424 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fc800078e0203 */ /*0060*/ IMAD.SHL.U32 R0, R0, 0x2, RZ ; /* 0x0000000200007824 */ /* 0x000fc800078e00ff */ /*0070*/ IMAD.SHL.U32 R4, R0, 0x4, RZ ; /* 0x0000000400047824 */ /* 0x000fc800078e00ff */ /*0080*/ LDC.64 R6, c[0x3][R4] ; /* 0x00c0000004067b82 */ /* 0x000e240000000a00 */ /*0090*/ I2F.F64 R2, R6 ; /* 0x0000000600027312 */ /* 0x001e300000201c00 */ /*00a0*/ I2F R0, R7 ; /* 0x0000000700007306 */ /* 0x0002b00000201400 */ /*00b0*/ F2F.F32.F64 R5, R2 ; /* 0x0000000200057310 */ /* 0x0010e40000301000 */ /*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff027624 */ /* 0x001fc400078e00ff */ /*00d0*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff037624 */ /* 0x006fc800078e00ff */ /*00e0*/ FADD R4, R8, 1 ; /* 0x3f80000008047421 */ /* 0x000fe20000000000 */ /*00f0*/ BSSY B0, 0x250 ; /* 0x0000015000007945 */ /* 0x000fe20003800000 */ /*0100*/ FFMA R9, -R5.reuse, R8, R0.reuse ; /* 0x0000000805097223 */ /* 0x148fe40000000100 */ /*0110*/ FFMA R10, -R5, R4, R0 ; /* 0x00000004050a7223 */ /* 0x000fc60000000100 */ /*0120*/ FSETP.GE.AND P0, PT, R9.reuse, 4, PT ; /* 0x408000000900780b */ /* 0x040fe40003f06000 */ /*0130*/ FSETP.GE.AND P4, PT, R10.reuse, 4, PT ; /* 0x408000000a00780b */ /* 0x040fe40003f86000 */ /*0140*/ FSETP.LE.AND P0, PT, R9.reuse, 5, P0 ; /* 0x40a000000900780b */ /* 0x040fe40000703000 */ /*0150*/ FSETP.GTU.OR P4, PT, R10, 5, !P4 ; /* 0x40a000000a00780b */ /* 0x000fe4000678c400 */ /*0160*/ FSETP.GE.AND P2, PT, R9.reuse, -1, PT ; /* 0xbf8000000900780b */ /* 0x040fe40003f46000 */ /*0170*/ FSETP.GE.AND P6, PT, R9, 3, PT ; /* 0x404000000900780b */ /* 0x000fc40003fc6000 */ /*0180*/ P2R R6, PR, RZ, 0x4 ; /* 0x00000004ff067803 */ /* 0x000fe40000000000 */ /*0190*/ FSETP.GE.AND P1, PT, R9.reuse, 2, PT ; /* 0x400000000900780b */ /* 0x040fe40003f26000 */ /*01a0*/ FSETP.GE.AND P3, PT, R9.reuse, 1, PT ; /* 0x3f8000000900780b */ /* 0x040fe40003f66000 */ /*01b0*/ FSETP.GE.AND P5, PT, R9, RZ, PT ; /* 0x000000ff0900720b */ /* 0x000fe40003fa6000 */ /*01c0*/ FSETP.GE.AND P2, PT, R10, 3, PT ; /* 0x404000000a00780b */ /* 0x000fe20003f46000 */ /*01d0*/ @P4 BRA !P0, 0x240 ; /* 0x0000006000004947 */ /* 0x000fee0004000000 */ /*01e0*/ S2R R8, SR_LANEID ; /* 0x0000000000087919 */ /* 0x000e220000000000 */ /*01f0*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fc400038e0100 */ /*0200*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*0210*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x000e6a0008000000 */ /*0220*/ ISETP.EQ.U32.AND P0, PT, R8, UR7, PT ; /* 0x0000000708007c0c */ /* 0x001fda000bf02070 */ /*0230*/ @P0 RED.E.ADD.STRONG.GPU [R2.64], R7 ; /* 0x000000070200098e */ /* 0x0021e4000c10e184 */ /*0240*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0250*/ FSETP.LE.AND P6, PT, R9.reuse, 4, P6 ; /* 0x408000000900780b */ /* 0x040fe200037c3000 */ /*0260*/ BSSY B0, 0x320 ; /* 0x000000b000007945 */ /* 0x000fe20003800000 */ /*0270*/ FSETP.GTU.OR P2, PT, R10.reuse, 4, !P2 ; /* 0x408000000a00780b */ /* 0x040fe4000574c400 */ /*0280*/ FSETP.GE.AND P4, PT, R9, -2, PT ; /* 0xc00000000900780b */ /* 0x000fe40003f86000 */ /*0290*/ FSETP.GE.AND P0, PT, R10, 2, PT ; /* 0x400000000a00780b */ /* 0x000fd20003f06000 */ /*02a0*/ @P2 BRA !P6, 0x310 ; /* 0x0000006000002947 */ /* 0x000fea0007000000 */ /*02b0*/ S2R R8, SR_LANEID ; /* 0x0000000000087919 */ /* 0x000e620000000000 */ /*02c0*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*02d0*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*02e0*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*02f0*/ ISETP.EQ.U32.AND P2, PT, R8, UR7, PT ; /* 0x0000000708007c0c */ /* 0x002fda000bf42070 */ /*0300*/ @P2 RED.E.ADD.STRONG.GPU [R2.64+0x4], R7 ; /* 0x000004070200298e */ /* 0x0011e4000c10e184 */ /*0310*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0320*/ FSETP.LE.AND P1, PT, R9.reuse, 3, P1 ; /* 0x404000000900780b */ /* 0x040fe20000f23000 */ /*0330*/ BSSY B0, 0x3f0 ; /* 0x000000b000007945 */ /* 0x000fe20003800000 */ /*0340*/ FSETP.GTU.OR P6, PT, R10.reuse, 3, !P0 ; /* 0x404000000a00780b */ /* 0x040fe400047cc400 */ /*0350*/ FSETP.GE.AND P2, PT, R9, -3, PT ; /* 0xc04000000900780b */ /* 0x000fe40003f46000 */ /*0360*/ FSETP.GE.AND P0, PT, R10, 1, PT ; /* 0x3f8000000a00780b */ /* 0x000fd20003f06000 */ /*0370*/ @P6 BRA !P1, 0x3e0 ; /* 0x0000006000006947 */ /* 0x000fea0004800000 */ /*0380*/ S2R R8, SR_LANEID ; /* 0x0000000000087919 */ /* 0x000e620000000000 */ /*0390*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*03a0*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*03b0*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*03c0*/ ISETP.EQ.U32.AND P1, PT, R8, UR7, PT ; /* 0x0000000708007c0c */ /* 0x002fda000bf22070 */ /*03d0*/ @P1 RED.E.ADD.STRONG.GPU [R2.64+0x8], R7 ; /* 0x000008070200198e */ /* 0x0011e4000c10e184 */ /*03e0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*03f0*/ FSETP.LE.AND P3, PT, R9.reuse, 2, P3 ; /* 0x400000000900780b */ /* 0x040fe20001f63000 */ /*0400*/ BSSY B0, 0x4c0 ; /* 0x000000b000007945 */ /* 0x000fe20003800000 */ /*0410*/ FSETP.GTU.OR P6, PT, R10, 2, !P0 ; /* 0x400000000a00780b */ /* 0x000fe400047cc400 */ /*0420*/ FSETP.GE.AND P1, PT, R9.reuse, -4, PT ; /* 0xc08000000900780b */ /* 0x040fe40003f26000 */ /*0430*/ FSETP.GE.AND P0, PT, R9, -5, PT ; /* 0xc0a000000900780b */ /* 0x000fd20003f06000 */ /*0440*/ @P6 BRA !P3, 0x4b0 ; /* 0x0000006000006947 */ /* 0x000fea0005800000 */ /*0450*/ S2R R8, SR_LANEID ; /* 0x0000000000087919 */ /* 0x000e620000000000 */ /*0460*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*0470*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*0480*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*0490*/ ISETP.EQ.U32.AND P3, PT, R8, UR7, PT ; /* 0x0000000708007c0c */ /* 0x002fda000bf62070 */ /*04a0*/ @P3 RED.E.ADD.STRONG.GPU [R2.64+0xc], R7 ; /* 0x00000c070200398e */ /* 0x0011e4000c10e184 */ /*04b0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*04c0*/ FSETP.GE.AND P6, PT, R10.reuse, RZ, PT ; /* 0x000000ff0a00720b */ /* 0x040fe20003fc6000 */ /*04d0*/ BSSY B0, 0x5d0 ; /* 0x000000f000007945 */ /* 0x000fe20003800000 */ /*04e0*/ FSETP.LE.AND P5, PT, R9.reuse, 1, P5 ; /* 0x3f8000000900780b */ /* 0x040fe40002fa3000 */ /*04f0*/ FSETP.GTU.OR P6, PT, R10, 1, !P6 ; /* 0x3f8000000a00780b */ /* 0x000fe400077cc400 */ /*0500*/ FSETP.LE.AND P4, PT, R9, -1, P4 ; /* 0xbf8000000900780b */ /* 0x000fe40002783000 */ /*0510*/ ISETP.NE.AND P3, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe40003f65270 */ /*0520*/ P2R R6, PR, RZ, 0x10 ; /* 0x00000010ff067803 */ /* 0x000fc40000000000 */ /*0530*/ FSETP.LE.AND P3, PT, R9, RZ, P3 ; /* 0x000000ff0900720b */ /* 0x000fe40001f63000 */ /*0540*/ FSETP.GE.AND P4, PT, R10, -1, PT ; /* 0xbf8000000a00780b */ /* 0x000fc60003f86000 */ /*0550*/ @P6 BRA !P5, 0x5c0 ; /* 0x0000006000006947 */ /* 0x000fea0006800000 */ /*0560*/ S2R R8, SR_LANEID ; /* 0x0000000000087919 */ /* 0x000e620000000000 */ /*0570*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*0580*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*0590*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*05a0*/ ISETP.EQ.U32.AND P5, PT, R8, UR7, PT ; /* 0x0000000708007c0c */ /* 0x002fda000bfa2070 */ /*05b0*/ @P5 RED.E.ADD.STRONG.GPU [R2.64+0x10], R7 ; /* 0x000010070200598e */ /* 0x0011e4000c10e184 */ /*05c0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*05d0*/ FSETP.GTU.OR P6, PT, R10.reuse, RZ, !P4 ; /* 0x000000ff0a00720b */ /* 0x040fe200067cc400 */ /*05e0*/ BSSY B0, 0x6b0 ; /* 0x000000c000007945 */ /* 0x000fe20003800000 */ /*05f0*/ FSETP.GE.AND P5, PT, R10.reuse, -2, PT ; /* 0xc00000000a00780b */ /* 0x040fe40003fa6000 */ /*0600*/ FSETP.LE.AND P2, PT, R9.reuse, -2, P2 ; /* 0xc00000000900780b */ /* 0x040fe40001743000 */ /*0610*/ FSETP.LE.AND P1, PT, R9, -3, P1 ; /* 0xc04000000900780b */ /* 0x000fe40000f23000 */ /*0620*/ FSETP.GE.AND P4, PT, R10, -3, PT ; /* 0xc04000000a00780b */ /* 0x000fca0003f86000 */ /*0630*/ @P6 BRA !P3, 0x6a0 ; /* 0x0000006000006947 */ /* 0x000fea0005800000 */ /*0640*/ S2R R8, SR_LANEID ; /* 0x0000000000087919 */ /* 0x000e620000000000 */ /*0650*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*0660*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*0670*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*0680*/ ISETP.EQ.U32.AND P3, PT, R8, UR7, PT ; /* 0x0000000708007c0c */ /* 0x002fda000bf62070 */ /*0690*/ @P3 RED.E.ADD.STRONG.GPU [R2.64+0x14], R7 ; /* 0x000014070200398e */ /* 0x0011e4000c10e184 */ /*06a0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*06b0*/ ISETP.NE.AND P6, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe20003fc5270 */ /*06c0*/ BSSY B0, 0x7d0 ; /* 0x0000010000007945 */ /* 0x000fe20003800000 */ /*06d0*/ FSETP.GTU.OR P5, PT, R10.reuse, -1, !P5 ; /* 0xbf8000000a00780b */ /* 0x040fe40006fac400 */ /*06e0*/ FSETP.GTU.OR P4, PT, R10.reuse, -2, !P4 ; /* 0xc00000000a00780b */ /* 0x040fe4000678c400 */ /*06f0*/ FSETP.GE.AND P3, PT, R10.reuse, -4, PT ; /* 0xc08000000a00780b */ /* 0x040fe40003f66000 */ /*0700*/ P2R R6, PR, RZ, 0x10 ; /* 0x00000010ff067803 */ /* 0x000fe40000000000 */ /*0710*/ FSETP.GE.AND P4, PT, R10, -5, PT ; /* 0xc0a000000a00780b */ /* 0x000fc40003f86000 */ /*0720*/ FSETP.LE.AND P0, PT, R9, -4, P0 ; /* 0xc08000000900780b */ /* 0x000fe40000703000 */ /*0730*/ FSETP.GTU.OR P3, PT, R10.reuse, -3, !P3 ; /* 0xc04000000a00780b */ /* 0x040fe40005f6c400 */ /*0740*/ FSETP.GTU.OR P4, PT, R10, -4, !P4 ; /* 0xc08000000a00780b */ /* 0x000fe2000678c400 */ /*0750*/ @P5 BRA !P6, 0x7c0 ; /* 0x0000006000005947 */ /* 0x000fee0007000000 */ /*0760*/ S2R R8, SR_LANEID ; /* 0x0000000000087919 */ /* 0x000e620000000000 */ /*0770*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*0780*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*0790*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*07a0*/ ISETP.EQ.U32.AND P5, PT, R8, UR7, PT ; /* 0x0000000708007c0c */ /* 0x002fda000bfa2070 */ /*07b0*/ @P5 RED.E.ADD.STRONG.GPU [R2.64+0x18], R7 ; /* 0x000018070200598e */ /* 0x0011e4000c10e184 */ /*07c0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*07d0*/ ISETP.NE.AND P6, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe20003fc5270 */ /*07e0*/ BSSY B0, 0x880 ; /* 0x0000009000007945 */ /* 0x000fe20003800000 */ /*07f0*/ FSETP.GEU.AND P5, PT, R4, 5, PT ; /* 0x40a000000400780b */ /* 0x000fd60003fae000 */ /*0800*/ @P6 BRA !P2, 0x870 ; /* 0x0000006000006947 */ /* 0x000fea0005000000 */ /*0810*/ S2R R6, SR_LANEID ; /* 0x0000000000067919 */ /* 0x000e620000000000 */ /*0820*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*0830*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*0840*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*0850*/ ISETP.EQ.U32.AND P2, PT, R6, UR7, PT ; /* 0x0000000706007c0c */ /* 0x002fda000bf42070 */ /*0860*/ @P2 RED.E.ADD.STRONG.GPU [R2.64+0x1c], R7 ; /* 0x00001c070200298e */ /* 0x0011e4000c10e184 */ /*0870*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0880*/ BSSY B0, 0x910 ; /* 0x0000008000007945 */ /* 0x000fe20003800000 */ /*0890*/ @P3 BRA !P1, 0x900 ; /* 0x0000006000003947 */ /* 0x000fea0004800000 */ /*08a0*/ S2R R6, SR_LANEID ; /* 0x0000000000067919 */ /* 0x000e620000000000 */ /*08b0*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*08c0*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*08d0*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*08e0*/ ISETP.EQ.U32.AND P1, PT, R6, UR7, PT ; /* 0x0000000706007c0c */ /* 0x002fda000bf22070 */ /*08f0*/ @P1 RED.E.ADD.STRONG.GPU [R2.64+0x20], R7 ; /* 0x000020070200198e */ /* 0x0011e4000c10e184 */ /*0900*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0910*/ BSSY B0, 0x9b0 ; /* 0x0000009000007945 */ /* 0x000fe20003800000 */ /*0920*/ IMAD.MOV.U32 R8, RZ, RZ, R4 ; /* 0x000000ffff087224 */ /* 0x000fe200078e0004 */ /*0930*/ @P4 BRA !P0, 0x9a0 ; /* 0x0000006000004947 */ /* 0x000fea0004000000 */ /*0940*/ S2R R4, SR_LANEID ; /* 0x0000000000047919 */ /* 0x000e620000000000 */ /*0950*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */ /* 0x000fe400038e0100 */ /*0960*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */ /* 0x000fe200080e0000 */ /*0970*/ POPC R7, UR6 ; /* 0x0000000600077d09 */ /* 0x001e2a0008000000 */ /*0980*/ ISETP.EQ.U32.AND P0, PT, R4, UR7, PT ; /* 0x0000000704007c0c */ /* 0x002fda000bf02070 */ /*0990*/ @P0 RED.E.ADD.STRONG.GPU [R2.64+0x24], R7 ; /* 0x000024070200098e */ /* 0x0011e4000c10e184 */ /*09a0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*09b0*/ IADD3 R2, P0, R2, 0x28, RZ ; /* 0x0000002802027810 */ /* 0x001fca0007f1e0ff */ /*09c0*/ IMAD.X R3, RZ, RZ, R3, P0 ; /* 0x000000ffff037224 */ /* 0x000fe200000e0603 */ /*09d0*/ @!P5 BRA 0xe0 ; /* 0xfffff7000000d947 */ /* 0x000fea000383ffff */ /*09e0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*09f0*/ BRA 0x9f0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0a00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z11kernelHoughiPi .globl _Z11kernelHoughiPi .p2align 8 .type _Z11kernelHoughiPi,@function _Z11kernelHoughiPi: s_load_b32 s2, s[0:1], 0x1c s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1) s_mul_i32 s15, s15, s2 s_getpc_b64 s[2:3] s_add_u32 s2, s2, d_coordarray@rel32@lo+4 s_addc_u32 s3, s3, d_coordarray@rel32@hi+12 v_add_lshl_u32 v0, s15, v0, 1 v_or_b32_e32 v2, 1, v0 v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[0:1], 2, v[0:1] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[2:3], 2, v[2:3] v_add_co_u32 v0, vcc_lo, v0, s2 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo v_add_co_u32 v2, vcc_lo, v2, s2 s_delay_alu instid0(VALU_DEP_4) v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo s_load_b64 s[2:3], s[0:1], 0x8 s_mov_b32 s1, 0 s_clause 0x1 global_load_b32 v0, v[0:1], off global_load_b32 v2, v[2:3], off v_mov_b32_e32 v1, 0xc0a00000 v_mov_b32_e32 v3, 0 s_waitcnt vmcnt(1) v_cvt_f32_i32_e32 v0, v0 s_waitcnt vmcnt(0) v_cvt_f32_i32_e32 v2, v2 s_branch .LBB0_2 .LBB0_1: s_set_inst_prefetch_distance 0x2 v_cmp_gt_f32_e32 vcc_lo, 0x40a00000, v1 s_add_i32 s1, s1, s4 s_cbranch_vccz .LBB0_10 .LBB0_2: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v7, 0x40a00000 s_mov_b32 s4, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_add_f32_e32 v1, 1.0, v4 v_fma_f32 v4, -v4, v0, v2 v_fma_f32 v5, -v1, v0, v2 s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB0_3: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_f32_e32 v6, -1.0, v7 v_cmp_nle_f32_e32 vcc_lo, v4, v7 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_nge_f32_e64 s0, v4, v6 s_or_b32 s0, vcc_lo, s0 s_delay_alu instid0(SALU_CYCLE_1) s_xor_b32 s5, s0, -1 s_and_saveexec_b32 s6, s0 v_cmp_le_f32_e32 vcc_lo, v5, v7 v_cmp_ge_f32_e64 s0, v5, v6 s_and_not1_b32 s5, s5, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s0, vcc_lo, s0 s_and_b32 s0, s0, exec_lo s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 s5, s5, s0 s_or_b32 exec_lo, exec_lo, s6 s_and_saveexec_b32 s0, s5 s_cbranch_execz .LBB0_8 s_mov_b32 s5, exec_lo s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mbcnt_lo_u32_b32 v7, s5, 0 v_cmp_eq_u32_e32 vcc_lo, 0, v7 s_and_b32 s6, exec_lo, vcc_lo s_delay_alu instid0(SALU_CYCLE_1) s_mov_b32 exec_lo, s6 s_cbranch_execz .LBB0_8 s_add_i32 s6, s1, s4 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_ashr_i32 s7, s6, 31 s_lshl_b64 s[6:7], s[6:7], 2 s_waitcnt lgkmcnt(0) s_add_u32 s6, s2, s6 s_addc_u32 s7, s3, s7 s_bcnt1_i32_b32 s5, s5 s_delay_alu instid0(SALU_CYCLE_1) v_mov_b32_e32 v7, s5 global_atomic_add_u32 v3, v7, s[6:7] .LBB0_8: s_or_b32 exec_lo, exec_lo, s0 s_add_i32 s4, s4, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s4, 10 s_cbranch_scc1 .LBB0_1 v_mov_b32_e32 v7, v6 s_branch .LBB0_3 .LBB0_10: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11kernelHoughiPi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z11kernelHoughiPi, .Lfunc_end0-_Z11kernelHoughiPi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .protected d_coordarray .type d_coordarray,@object .section .bss,"aw",@nobits .globl d_coordarray .p2align 4, 0x0 d_coordarray: .zero 80 .size d_coordarray, 80 .type __hip_cuid_,@object .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym d_coordarray .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11kernelHoughiPi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z11kernelHoughiPi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <sys/time.h> #define SIZE 102400 #define MOD 102399 #define STEP 128 /* ARRAY A INITIALIZER */ void init_a(int * a) { int i; for(i=0; i<SIZE; i++) { a[i] = 1; } } /* ARRAY B INITIALIZER */ void init_b(int * b) { int i, j; j=0; for(i=0; i<SIZE-1; i++) { b[j] = i; j = (j+STEP)%MOD; } b[SIZE-1] = SIZE-1; } /* CHECKING A VALUES */ int check_a(int * a) { int i; int correct = 1; for(i=0; i<SIZE; i++) { if(a[i] != (i+1)) { correct = 0; } } return correct; } // First version of the function (the original one). __global__ void mykernel1(int * a, int * b, int N) { for(int i = threadIdx.x; i < N; i+=blockDim.x) { int v = b[i]; a[v] = a[v] + v; } } // Second version of the function (question #3). __global__ void mykernel2(int * a, int * b, int *count, int N) { // blockIdx.x: position of the block within the grid. // blockDim.x: dimension of a block (relatively to the direction "x"). // threadIdx.x: position of the thread relatively to the block. printf("blockIdx.x:%d * blockDim.x:%d + threadIdx.x:%d => %d\n", blockIdx.x, blockDim.x, threadIdx.x, blockIdx.x * blockDim.x + threadIdx.x); *count = *count + 1; int index = blockIdx.x * blockDim.x + threadIdx.x; for(int i=index; i<N; i+=gridDim.x * blockDim.x) { int v = b[i]; a[v] = a[v] + v; } } #define Q 2 int main(int argc, char * argv[]) { struct timeval start; struct timeval stop; int * a = (int *)malloc(sizeof(int)*SIZE); int * b = (int *)malloc(sizeof(int)*SIZE); int count = 0; init_a(a); init_b(b); /* INSERT CUDA ALLOCATION AND COPY HERE */ int * d_a, * d_b, *d_count; cudaMalloc(&d_a, sizeof(int)*SIZE); cudaMalloc(&d_b, sizeof(int)*SIZE); cudaMalloc(&d_count, sizeof(int)); cudaMemcpy(d_a, a, sizeof(int)*SIZE, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(int)*SIZE, cudaMemcpyHostToDevice); cudaMemcpy(d_count, &count, sizeof(int), cudaMemcpyHostToDevice); dim3 nBlocks; dim3 nThperBlock; // We define 1024 threads in only one block. // This is not a good solution. if (1 == Q) { nBlocks.x = 1; // number of block in the grid. nThperBlock.x = 1024; // number of threads per bloc } else { nBlocks.x = 16; // number of blocks in the grid. nThperBlock.x = 1024; // number of threads per bloc } // Execute the "kernel" in the GPU. // nBlocks: number of block in the grid // nThperBlock: number of threads per bloc gettimeofday(&start, nullptr); if (1 == Q) { mykernel1<<< nBlocks , nThperBlock >>>(d_a, d_b, SIZE); } else { printf("nBlocks = %d\n", nBlocks.x); printf("nThperBlock = %d\n", nThperBlock.x); mykernel2<<< nBlocks , nThperBlock >>>(d_a, d_b, d_count, SIZE); } // The kernel executes asynchronously relatively to the CPU. // That is: (1) the CPU starts the kernel. // (2) the kernel starts its execution. // (3) but, before the kernel stops, the CPU continues its execution! // Thus, we need to synchronize the CPU and the kernel. // The function "cudaDeviceSynchronize" waits for the kernel to finish. cudaDeviceSynchronize(); gettimeofday(&stop, nullptr); printf("Execution duration: %ld (s) %ld (us)\n", stop.tv_sec - start.tv_sec, stop.tv_usec - start.tv_usec); // Copy the result from the GPU to the RAM. // Note: cudaMemcpy(dst, src, count, kind) cudaMemcpy(a, d_a, sizeof(int)*SIZE, cudaMemcpyDeviceToHost); cudaMemcpy(&count, d_count, sizeof(int), cudaMemcpyDeviceToHost); printf("Total number of loops: %d\n", count); int correct = check_a(a);; if(0 == correct) { printf("\n\n ******************** \n ***/!\\ ERROR /!\\ *** \n ******************** \n\n"); } else { printf("\n\n ******************** \n ***** SUCCESS! ***** \n ******************** \n\n"); } return 1; }
code for sm_80 Function : _Z9mykernel2PiS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fc800078e00ff */ /*0010*/ S2R R18, SR_TID.X ; /* 0x0000000000127919 */ /* 0x000e220000002100 */ /*0020*/ IADD3 R1, R1, -0x10, RZ ; /* 0xfffffff001017810 */ /* 0x000fe20007ffe0ff */ /*0030*/ IMAD.MOV.U32 R17, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff117624 */ /* 0x000fe200078e00ff */ /*0040*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe20000000f00 */ /*0050*/ S2R R16, SR_CTAID.X ; /* 0x0000000000107919 */ /* 0x000e220000002500 */ /*0060*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */ /* 0x000fe20007f1e0ff */ /*0070*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */ /* 0x000fe200078e00ff */ /*0080*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */ /* 0x0002a20000000a00 */ /*0090*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */ /* 0x000fe200078e00ff */ /*00a0*/ ULDC.64 UR36, c[0x0][0x118] ; /* 0x0000460000247ab9 */ /* 0x000fe20000000a00 */ /*00b0*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */ /* 0x000fc400000e06ff */ /*00c0*/ IMAD R19, R16, c[0x0][0x0], R18 ; /* 0x0000000010137a24 */ /* 0x001fca00078e0212 */ /*00d0*/ STL.128 [R1], R16 ; /* 0x0000001001007387 */ /* 0x0003e40000100c00 */ /*00e0*/ LEPC R8 ; /* 0x000000000008734e */ /* 0x004fe20000000000 */ /*00f0*/ MOV R11, 0x160 ; /* 0x00000160000b7802 */ /* 0x000fe40000000f00 */ /*0100*/ MOV R20, 0xe0 ; /* 0x000000e000147802 */ /* 0x000fe40000000f00 */ /*0110*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*0120*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x002fe40000000f00 */ /*0130*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */ /* 0x000fc8000791e108 */ /*0140*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2509 */ /*0150*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */ /* 0x000fea0003c00000 */ /*0160*/ MOV R2, c[0x0][0x170] ; /* 0x00005c0000027a02 */ /* 0x000fe20000000f00 */ /*0170*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff037624 */ /* 0x000fca00078e00ff */ /*0180*/ LDG.E R0, [R2.64] ; /* 0x0000002402007981 */ /* 0x000ea2000c1e1900 */ /*0190*/ ISETP.GE.AND P0, PT, R19, c[0x0][0x178], PT ; /* 0x00005e0013007a0c */ /* 0x000fe40003f06270 */ /*01a0*/ IADD3 R5, R0, 0x1, RZ ; /* 0x0000000100057810 */ /* 0x004fca0007ffe0ff */ /*01b0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x0001ec000c101924 */ /*01c0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*01d0*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */ /* 0x001fc800078e00ff */ /*01e0*/ IMAD.WIDE R2, R19, R4, c[0x0][0x168] ; /* 0x00005a0013027625 */ /* 0x001fcc00078e0204 */ /*01f0*/ LDG.E R2, [R2.64] ; /* 0x0000002402027981 */ /* 0x000ea4000c1e1900 */ /*0200*/ IMAD.WIDE R4, R2, R4, c[0x0][0x160] ; /* 0x0000580002047625 */ /* 0x004fca00078e0204 */ /*0210*/ LDG.E R7, [R4.64] ; /* 0x0000002404077981 */ /* 0x000ea2000c1e1900 */ /*0220*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0xc] ; /* 0x00000300ff007624 */ /* 0x000fc800078e00ff */ /*0230*/ IMAD R19, R0, c[0x0][0x0], R19 ; /* 0x0000000000137a24 */ /* 0x000fca00078e0213 */ /*0240*/ ISETP.GE.AND P0, PT, R19, c[0x0][0x178], PT ; /* 0x00005e0013007a0c */ /* 0x000fe40003f06270 */ /*0250*/ IADD3 R7, R2, R7, RZ ; /* 0x0000000702077210 */ /* 0x004fca0007ffe0ff */ /*0260*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x0001ec000c101924 */ /*0270*/ @!P0 BRA 0x1d0 ; /* 0xffffff5000008947 */ /* 0x000fea000383ffff */ /*0280*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0290*/ BRA 0x290; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z9mykernel1PiS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e240000002100 */ /*0020*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */ /* 0x001fda0003f06270 */ /*0030*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0040*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x001fe200000001ff */ /*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0060*/ IMAD.WIDE R2, R0, R5, c[0x0][0x168] ; /* 0x00005a0000027625 */ /* 0x000fcc00078e0205 */ /*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*0080*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */ /* 0x000fe20007ffe0ff */ /*0090*/ IMAD.WIDE R4, R2, R5, c[0x0][0x160] ; /* 0x0000580002047625 */ /* 0x004fca00078e0205 */ /*00a0*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */ /* 0x000ea2000c1e1900 */ /*00b0*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */ /* 0x000fe20003f06270 */ /*00c0*/ IMAD.IADD R7, R2, 0x1, R7 ; /* 0x0000000102077824 */ /* 0x004fca00078e0207 */ /*00d0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x0001ee000c101904 */ /*00e0*/ @!P0 BRA 0x40 ; /* 0xffffff5000008947 */ /* 0x000fea000383ffff */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <sys/time.h> #define SIZE 102400 #define MOD 102399 #define STEP 128 /* ARRAY A INITIALIZER */ void init_a(int * a) { int i; for(i=0; i<SIZE; i++) { a[i] = 1; } } /* ARRAY B INITIALIZER */ void init_b(int * b) { int i, j; j=0; for(i=0; i<SIZE-1; i++) { b[j] = i; j = (j+STEP)%MOD; } b[SIZE-1] = SIZE-1; } /* CHECKING A VALUES */ int check_a(int * a) { int i; int correct = 1; for(i=0; i<SIZE; i++) { if(a[i] != (i+1)) { correct = 0; } } return correct; } // First version of the function (the original one). __global__ void mykernel1(int * a, int * b, int N) { for(int i = threadIdx.x; i < N; i+=blockDim.x) { int v = b[i]; a[v] = a[v] + v; } } // Second version of the function (question #3). __global__ void mykernel2(int * a, int * b, int *count, int N) { // blockIdx.x: position of the block within the grid. // blockDim.x: dimension of a block (relatively to the direction "x"). // threadIdx.x: position of the thread relatively to the block. printf("blockIdx.x:%d * blockDim.x:%d + threadIdx.x:%d => %d\n", blockIdx.x, blockDim.x, threadIdx.x, blockIdx.x * blockDim.x + threadIdx.x); *count = *count + 1; int index = blockIdx.x * blockDim.x + threadIdx.x; for(int i=index; i<N; i+=gridDim.x * blockDim.x) { int v = b[i]; a[v] = a[v] + v; } } #define Q 2 int main(int argc, char * argv[]) { struct timeval start; struct timeval stop; int * a = (int *)malloc(sizeof(int)*SIZE); int * b = (int *)malloc(sizeof(int)*SIZE); int count = 0; init_a(a); init_b(b); /* INSERT CUDA ALLOCATION AND COPY HERE */ int * d_a, * d_b, *d_count; cudaMalloc(&d_a, sizeof(int)*SIZE); cudaMalloc(&d_b, sizeof(int)*SIZE); cudaMalloc(&d_count, sizeof(int)); cudaMemcpy(d_a, a, sizeof(int)*SIZE, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(int)*SIZE, cudaMemcpyHostToDevice); cudaMemcpy(d_count, &count, sizeof(int), cudaMemcpyHostToDevice); dim3 nBlocks; dim3 nThperBlock; // We define 1024 threads in only one block. // This is not a good solution. if (1 == Q) { nBlocks.x = 1; // number of block in the grid. nThperBlock.x = 1024; // number of threads per bloc } else { nBlocks.x = 16; // number of blocks in the grid. nThperBlock.x = 1024; // number of threads per bloc } // Execute the "kernel" in the GPU. // nBlocks: number of block in the grid // nThperBlock: number of threads per bloc gettimeofday(&start, nullptr); if (1 == Q) { mykernel1<<< nBlocks , nThperBlock >>>(d_a, d_b, SIZE); } else { printf("nBlocks = %d\n", nBlocks.x); printf("nThperBlock = %d\n", nThperBlock.x); mykernel2<<< nBlocks , nThperBlock >>>(d_a, d_b, d_count, SIZE); } // The kernel executes asynchronously relatively to the CPU. // That is: (1) the CPU starts the kernel. // (2) the kernel starts its execution. // (3) but, before the kernel stops, the CPU continues its execution! // Thus, we need to synchronize the CPU and the kernel. // The function "cudaDeviceSynchronize" waits for the kernel to finish. cudaDeviceSynchronize(); gettimeofday(&stop, nullptr); printf("Execution duration: %ld (s) %ld (us)\n", stop.tv_sec - start.tv_sec, stop.tv_usec - start.tv_usec); // Copy the result from the GPU to the RAM. // Note: cudaMemcpy(dst, src, count, kind) cudaMemcpy(a, d_a, sizeof(int)*SIZE, cudaMemcpyDeviceToHost); cudaMemcpy(&count, d_count, sizeof(int), cudaMemcpyDeviceToHost); printf("Total number of loops: %d\n", count); int correct = check_a(a);; if(0 == correct) { printf("\n\n ******************** \n ***/!\\ ERROR /!\\ *** \n ******************** \n\n"); } else { printf("\n\n ******************** \n ***** SUCCESS! ***** \n ******************** \n\n"); } return 1; }
.file "tmpxft_0017a494_00000000-6_Ex3.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z6init_aPi .type _Z6init_aPi, @function _Z6init_aPi: .LFB2057: .cfi_startproc endbr64 leaq 409600(%rdi), %rax .L4: movl $1, (%rdi) addq $4, %rdi cmpq %rax, %rdi jne .L4 ret .cfi_endproc .LFE2057: .size _Z6init_aPi, .-_Z6init_aPi .globl _Z6init_bPi .type _Z6init_bPi, @function _Z6init_bPi: .LFB2058: .cfi_startproc endbr64 movl $0, %eax movl $0, %edx .L7: movslq %eax, %rcx movl %edx, (%rdi,%rcx,4) leal 128(%rax), %ecx movslq %ecx, %rax imulq $1374402957, %rax, %rax sarq $47, %rax movl %ecx, %esi sarl $31, %esi subl %esi, %eax imull $102399, %eax, %esi movl %ecx, %eax subl %esi, %eax addl $1, %edx cmpl $102399, %edx jne .L7 movl $102399, 409596(%rdi) ret .cfi_endproc .LFE2058: .size _Z6init_bPi, .-_Z6init_bPi .globl _Z7check_aPi .type _Z7check_aPi, @function _Z7check_aPi: .LFB2059: .cfi_startproc endbr64 movl $1, %eax movl $1, %edx movl $0, %ecx .L11: cmpl %eax, -4(%rdi,%rax,4) cmovne %ecx, %edx addq $1, %rax cmpq $102401, %rax jne .L11 movl %edx, %eax ret .cfi_endproc .LFE2059: .size _Z7check_aPi, .-_Z7check_aPi .globl _Z31__device_stub__Z9mykernel1PiS_iPiS_i .type _Z31__device_stub__Z9mykernel1PiS_iPiS_i, @function _Z31__device_stub__Z9mykernel1PiS_iPiS_i: .LFB2085: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L18 .L14: movq 120(%rsp), %rax subq %fs:40, %rax jne .L19 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L18: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9mykernel1PiS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L14 .L19: call __stack_chk_fail@PLT .cfi_endproc .LFE2085: .size _Z31__device_stub__Z9mykernel1PiS_iPiS_i, .-_Z31__device_stub__Z9mykernel1PiS_iPiS_i .globl _Z9mykernel1PiS_i .type _Z9mykernel1PiS_i, @function _Z9mykernel1PiS_i: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z9mykernel1PiS_iPiS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _Z9mykernel1PiS_i, .-_Z9mykernel1PiS_i .globl _Z33__device_stub__Z9mykernel2PiS_S_iPiS_S_i .type _Z33__device_stub__Z9mykernel2PiS_S_iPiS_S_i, @function _Z33__device_stub__Z9mykernel2PiS_S_iPiS_S_i: .LFB2087: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L26 .L22: movq 136(%rsp), %rax subq %fs:40, %rax jne .L27 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L26: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9mykernel2PiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L22 .L27: call __stack_chk_fail@PLT .cfi_endproc .LFE2087: .size _Z33__device_stub__Z9mykernel2PiS_S_iPiS_S_i, .-_Z33__device_stub__Z9mykernel2PiS_S_iPiS_S_i .globl _Z9mykernel2PiS_S_i .type _Z9mykernel2PiS_S_i, @function _Z9mykernel2PiS_S_i: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z33__device_stub__Z9mykernel2PiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _Z9mykernel2PiS_S_i, .-_Z9mykernel2PiS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "nBlocks = %d\n" .LC1: .string "nThperBlock = %d\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC2: .string "Execution duration: %ld (s) %ld (us)\n" .section .rodata.str1.1 .LC3: .string "Total number of loops: %d\n" .section .rodata.str1.8 .align 8 .LC4: .string "\n\n ******************** \n ***/!\\ ERROR /!\\ *** \n ******************** \n\n" .align 8 .LC5: .string "\n\n ******************** \n ***** SUCCESS! ***** \n ******************** \n\n" .text .globl main .type main, @function main: .LFB2060: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $120, %rsp .cfi_def_cfa_offset 144 movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax movl $409600, %edi call malloc@PLT movq %rax, %rbx movl $409600, %edi call malloc@PLT movq %rax, %rbp movl $0, 12(%rsp) movq %rbx, %rdi call _Z6init_aPi movq %rbp, %rdi call _Z6init_bPi leaq 16(%rsp), %rdi movl $409600, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $409600, %esi call cudaMalloc@PLT leaq 32(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movl $1, %ecx movl $409600, %edx movq %rbx, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $409600, %edx movq %rbp, %rsi movq 24(%rsp), %rdi call cudaMemcpy@PLT leaq 12(%rsp), %rsi movl $1, %ecx movl $4, %edx movq 32(%rsp), %rdi call cudaMemcpy@PLT movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) leaq 64(%rsp), %rdi movl $0, %esi call gettimeofday@PLT movl $16, %edx leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1024, %edx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $16, 40(%rsp) movl $1024, 52(%rsp) movl 60(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 52(%rsp), %rdx movq 40(%rsp), %rdi movl 48(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L36 .L31: call cudaDeviceSynchronize@PLT leaq 80(%rsp), %rdi movl $0, %esi call gettimeofday@PLT movq 88(%rsp), %rcx subq 72(%rsp), %rcx movq 80(%rsp), %rdx subq 64(%rsp), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $2, %ecx movl $409600, %edx movq 16(%rsp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT leaq 12(%rsp), %rdi movl $2, %ecx movl $4, %edx movq 32(%rsp), %rsi call cudaMemcpy@PLT movl 12(%rsp), %edx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %rbx, %rdi call _Z7check_aPi testl %eax, %eax jne .L32 leaq .LC4(%rip), %rsi movl $2, %edi call __printf_chk@PLT .L33: movq 104(%rsp), %rax subq %fs:40, %rax jne .L37 movl $1, %eax addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L36: .cfi_restore_state movl $102400, %ecx movq 32(%rsp), %rdx movq 24(%rsp), %rsi movq 16(%rsp), %rdi call _Z33__device_stub__Z9mykernel2PiS_S_iPiS_S_i jmp .L31 .L32: leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L33 .L37: call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size main, .-main .section .rodata.str1.1 .LC6: .string "_Z9mykernel2PiS_S_i" .LC7: .string "_Z9mykernel1PiS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2090: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq _Z9mykernel2PiS_S_i(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z9mykernel1PiS_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2090: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <sys/time.h> #define SIZE 102400 #define MOD 102399 #define STEP 128 /* ARRAY A INITIALIZER */ void init_a(int * a) { int i; for(i=0; i<SIZE; i++) { a[i] = 1; } } /* ARRAY B INITIALIZER */ void init_b(int * b) { int i, j; j=0; for(i=0; i<SIZE-1; i++) { b[j] = i; j = (j+STEP)%MOD; } b[SIZE-1] = SIZE-1; } /* CHECKING A VALUES */ int check_a(int * a) { int i; int correct = 1; for(i=0; i<SIZE; i++) { if(a[i] != (i+1)) { correct = 0; } } return correct; } // First version of the function (the original one). __global__ void mykernel1(int * a, int * b, int N) { for(int i = threadIdx.x; i < N; i+=blockDim.x) { int v = b[i]; a[v] = a[v] + v; } } // Second version of the function (question #3). __global__ void mykernel2(int * a, int * b, int *count, int N) { // blockIdx.x: position of the block within the grid. // blockDim.x: dimension of a block (relatively to the direction "x"). // threadIdx.x: position of the thread relatively to the block. printf("blockIdx.x:%d * blockDim.x:%d + threadIdx.x:%d => %d\n", blockIdx.x, blockDim.x, threadIdx.x, blockIdx.x * blockDim.x + threadIdx.x); *count = *count + 1; int index = blockIdx.x * blockDim.x + threadIdx.x; for(int i=index; i<N; i+=gridDim.x * blockDim.x) { int v = b[i]; a[v] = a[v] + v; } } #define Q 2 int main(int argc, char * argv[]) { struct timeval start; struct timeval stop; int * a = (int *)malloc(sizeof(int)*SIZE); int * b = (int *)malloc(sizeof(int)*SIZE); int count = 0; init_a(a); init_b(b); /* INSERT CUDA ALLOCATION AND COPY HERE */ int * d_a, * d_b, *d_count; cudaMalloc(&d_a, sizeof(int)*SIZE); cudaMalloc(&d_b, sizeof(int)*SIZE); cudaMalloc(&d_count, sizeof(int)); cudaMemcpy(d_a, a, sizeof(int)*SIZE, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(int)*SIZE, cudaMemcpyHostToDevice); cudaMemcpy(d_count, &count, sizeof(int), cudaMemcpyHostToDevice); dim3 nBlocks; dim3 nThperBlock; // We define 1024 threads in only one block. // This is not a good solution. if (1 == Q) { nBlocks.x = 1; // number of block in the grid. nThperBlock.x = 1024; // number of threads per bloc } else { nBlocks.x = 16; // number of blocks in the grid. nThperBlock.x = 1024; // number of threads per bloc } // Execute the "kernel" in the GPU. // nBlocks: number of block in the grid // nThperBlock: number of threads per bloc gettimeofday(&start, nullptr); if (1 == Q) { mykernel1<<< nBlocks , nThperBlock >>>(d_a, d_b, SIZE); } else { printf("nBlocks = %d\n", nBlocks.x); printf("nThperBlock = %d\n", nThperBlock.x); mykernel2<<< nBlocks , nThperBlock >>>(d_a, d_b, d_count, SIZE); } // The kernel executes asynchronously relatively to the CPU. // That is: (1) the CPU starts the kernel. // (2) the kernel starts its execution. // (3) but, before the kernel stops, the CPU continues its execution! // Thus, we need to synchronize the CPU and the kernel. // The function "cudaDeviceSynchronize" waits for the kernel to finish. cudaDeviceSynchronize(); gettimeofday(&stop, nullptr); printf("Execution duration: %ld (s) %ld (us)\n", stop.tv_sec - start.tv_sec, stop.tv_usec - start.tv_usec); // Copy the result from the GPU to the RAM. // Note: cudaMemcpy(dst, src, count, kind) cudaMemcpy(a, d_a, sizeof(int)*SIZE, cudaMemcpyDeviceToHost); cudaMemcpy(&count, d_count, sizeof(int), cudaMemcpyDeviceToHost); printf("Total number of loops: %d\n", count); int correct = check_a(a);; if(0 == correct) { printf("\n\n ******************** \n ***/!\\ ERROR /!\\ *** \n ******************** \n\n"); } else { printf("\n\n ******************** \n ***** SUCCESS! ***** \n ******************** \n\n"); } return 1; }
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include <sys/time.h> #define SIZE 102400 #define MOD 102399 #define STEP 128 /* ARRAY A INITIALIZER */ void init_a(int * a) { int i; for(i=0; i<SIZE; i++) { a[i] = 1; } } /* ARRAY B INITIALIZER */ void init_b(int * b) { int i, j; j=0; for(i=0; i<SIZE-1; i++) { b[j] = i; j = (j+STEP)%MOD; } b[SIZE-1] = SIZE-1; } /* CHECKING A VALUES */ int check_a(int * a) { int i; int correct = 1; for(i=0; i<SIZE; i++) { if(a[i] != (i+1)) { correct = 0; } } return correct; } // First version of the function (the original one). __global__ void mykernel1(int * a, int * b, int N) { for(int i = threadIdx.x; i < N; i+=blockDim.x) { int v = b[i]; a[v] = a[v] + v; } } // Second version of the function (question #3). __global__ void mykernel2(int * a, int * b, int *count, int N) { // blockIdx.x: position of the block within the grid. // blockDim.x: dimension of a block (relatively to the direction "x"). // threadIdx.x: position of the thread relatively to the block. printf("blockIdx.x:%d * blockDim.x:%d + threadIdx.x:%d => %d\n", blockIdx.x, blockDim.x, threadIdx.x, blockIdx.x * blockDim.x + threadIdx.x); *count = *count + 1; int index = blockIdx.x * blockDim.x + threadIdx.x; for(int i=index; i<N; i+=gridDim.x * blockDim.x) { int v = b[i]; a[v] = a[v] + v; } } #define Q 2 int main(int argc, char * argv[]) { struct timeval start; struct timeval stop; int * a = (int *)malloc(sizeof(int)*SIZE); int * b = (int *)malloc(sizeof(int)*SIZE); int count = 0; init_a(a); init_b(b); /* INSERT CUDA ALLOCATION AND COPY HERE */ int * d_a, * d_b, *d_count; hipMalloc(&d_a, sizeof(int)*SIZE); hipMalloc(&d_b, sizeof(int)*SIZE); hipMalloc(&d_count, sizeof(int)); hipMemcpy(d_a, a, sizeof(int)*SIZE, hipMemcpyHostToDevice); hipMemcpy(d_b, b, sizeof(int)*SIZE, hipMemcpyHostToDevice); hipMemcpy(d_count, &count, sizeof(int), hipMemcpyHostToDevice); dim3 nBlocks; dim3 nThperBlock; // We define 1024 threads in only one block. // This is not a good solution. if (1 == Q) { nBlocks.x = 1; // number of block in the grid. nThperBlock.x = 1024; // number of threads per bloc } else { nBlocks.x = 16; // number of blocks in the grid. nThperBlock.x = 1024; // number of threads per bloc } // Execute the "kernel" in the GPU. // nBlocks: number of block in the grid // nThperBlock: number of threads per bloc gettimeofday(&start, nullptr); if (1 == Q) { mykernel1<<< nBlocks , nThperBlock >>>(d_a, d_b, SIZE); } else { printf("nBlocks = %d\n", nBlocks.x); printf("nThperBlock = %d\n", nThperBlock.x); mykernel2<<< nBlocks , nThperBlock >>>(d_a, d_b, d_count, SIZE); } // The kernel executes asynchronously relatively to the CPU. // That is: (1) the CPU starts the kernel. // (2) the kernel starts its execution. // (3) but, before the kernel stops, the CPU continues its execution! // Thus, we need to synchronize the CPU and the kernel. // The function "cudaDeviceSynchronize" waits for the kernel to finish. hipDeviceSynchronize(); gettimeofday(&stop, nullptr); printf("Execution duration: %ld (s) %ld (us)\n", stop.tv_sec - start.tv_sec, stop.tv_usec - start.tv_usec); // Copy the result from the GPU to the RAM. // Note: cudaMemcpy(dst, src, count, kind) hipMemcpy(a, d_a, sizeof(int)*SIZE, hipMemcpyDeviceToHost); hipMemcpy(&count, d_count, sizeof(int), hipMemcpyDeviceToHost); printf("Total number of loops: %d\n", count); int correct = check_a(a);; if(0 == correct) { printf("\n\n ******************** \n ***/!\\ ERROR /!\\ *** \n ******************** \n\n"); } else { printf("\n\n ******************** \n ***** SUCCESS! ***** \n ******************** \n\n"); } return 1; }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include <sys/time.h> #define SIZE 102400 #define MOD 102399 #define STEP 128 /* ARRAY A INITIALIZER */ void init_a(int * a) { int i; for(i=0; i<SIZE; i++) { a[i] = 1; } } /* ARRAY B INITIALIZER */ void init_b(int * b) { int i, j; j=0; for(i=0; i<SIZE-1; i++) { b[j] = i; j = (j+STEP)%MOD; } b[SIZE-1] = SIZE-1; } /* CHECKING A VALUES */ int check_a(int * a) { int i; int correct = 1; for(i=0; i<SIZE; i++) { if(a[i] != (i+1)) { correct = 0; } } return correct; } // First version of the function (the original one). __global__ void mykernel1(int * a, int * b, int N) { for(int i = threadIdx.x; i < N; i+=blockDim.x) { int v = b[i]; a[v] = a[v] + v; } } // Second version of the function (question #3). __global__ void mykernel2(int * a, int * b, int *count, int N) { // blockIdx.x: position of the block within the grid. // blockDim.x: dimension of a block (relatively to the direction "x"). // threadIdx.x: position of the thread relatively to the block. printf("blockIdx.x:%d * blockDim.x:%d + threadIdx.x:%d => %d\n", blockIdx.x, blockDim.x, threadIdx.x, blockIdx.x * blockDim.x + threadIdx.x); *count = *count + 1; int index = blockIdx.x * blockDim.x + threadIdx.x; for(int i=index; i<N; i+=gridDim.x * blockDim.x) { int v = b[i]; a[v] = a[v] + v; } } #define Q 2 int main(int argc, char * argv[]) { struct timeval start; struct timeval stop; int * a = (int *)malloc(sizeof(int)*SIZE); int * b = (int *)malloc(sizeof(int)*SIZE); int count = 0; init_a(a); init_b(b); /* INSERT CUDA ALLOCATION AND COPY HERE */ int * d_a, * d_b, *d_count; hipMalloc(&d_a, sizeof(int)*SIZE); hipMalloc(&d_b, sizeof(int)*SIZE); hipMalloc(&d_count, sizeof(int)); hipMemcpy(d_a, a, sizeof(int)*SIZE, hipMemcpyHostToDevice); hipMemcpy(d_b, b, sizeof(int)*SIZE, hipMemcpyHostToDevice); hipMemcpy(d_count, &count, sizeof(int), hipMemcpyHostToDevice); dim3 nBlocks; dim3 nThperBlock; // We define 1024 threads in only one block. // This is not a good solution. if (1 == Q) { nBlocks.x = 1; // number of block in the grid. nThperBlock.x = 1024; // number of threads per bloc } else { nBlocks.x = 16; // number of blocks in the grid. nThperBlock.x = 1024; // number of threads per bloc } // Execute the "kernel" in the GPU. // nBlocks: number of block in the grid // nThperBlock: number of threads per bloc gettimeofday(&start, nullptr); if (1 == Q) { mykernel1<<< nBlocks , nThperBlock >>>(d_a, d_b, SIZE); } else { printf("nBlocks = %d\n", nBlocks.x); printf("nThperBlock = %d\n", nThperBlock.x); mykernel2<<< nBlocks , nThperBlock >>>(d_a, d_b, d_count, SIZE); } // The kernel executes asynchronously relatively to the CPU. // That is: (1) the CPU starts the kernel. // (2) the kernel starts its execution. // (3) but, before the kernel stops, the CPU continues its execution! // Thus, we need to synchronize the CPU and the kernel. // The function "cudaDeviceSynchronize" waits for the kernel to finish. hipDeviceSynchronize(); gettimeofday(&stop, nullptr); printf("Execution duration: %ld (s) %ld (us)\n", stop.tv_sec - start.tv_sec, stop.tv_usec - start.tv_usec); // Copy the result from the GPU to the RAM. // Note: cudaMemcpy(dst, src, count, kind) hipMemcpy(a, d_a, sizeof(int)*SIZE, hipMemcpyDeviceToHost); hipMemcpy(&count, d_count, sizeof(int), hipMemcpyDeviceToHost); printf("Total number of loops: %d\n", count); int correct = check_a(a);; if(0 == correct) { printf("\n\n ******************** \n ***/!\\ ERROR /!\\ *** \n ******************** \n\n"); } else { printf("\n\n ******************** \n ***** SUCCESS! ***** \n ******************** \n\n"); } return 1; }
.text .file "Ex3.hip" .globl _Z6init_aPi # -- Begin function _Z6init_aPi .p2align 4, 0x90 .type _Z6init_aPi,@function _Z6init_aPi: # @_Z6init_aPi .cfi_startproc # %bb.0: xorl %eax, %eax .p2align 4, 0x90 .LBB0_1: # =>This Inner Loop Header: Depth=1 movl $1, (%rdi,%rax,4) incq %rax cmpq $102400, %rax # imm = 0x19000 jne .LBB0_1 # %bb.2: retq .Lfunc_end0: .size _Z6init_aPi, .Lfunc_end0-_Z6init_aPi .cfi_endproc # -- End function .globl _Z6init_bPi # -- Begin function _Z6init_bPi .p2align 4, 0x90 .type _Z6init_bPi,@function _Z6init_bPi: # @_Z6init_bPi .cfi_startproc # %bb.0: xorl %ecx, %ecx xorl %eax, %eax .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movslq %ecx, %rcx movl %eax, (%rdi,%rcx,4) movl %ecx, %edx subl $-128, %edx movslq %edx, %rdx imulq $1374402957, %rdx, %rdx # imm = 0x51EBB98D movq %rdx, %rsi shrq $63, %rsi sarq $47, %rdx addl %esi, %edx imull $102399, %edx, %edx # imm = 0x18FFF negl %edx addl %edx, %ecx addl $128, %ecx incl %eax cmpl $102399, %eax # imm = 0x18FFF jne .LBB1_1 # %bb.2: movl $102399, 409596(%rdi) # imm = 0x18FFF retq .Lfunc_end1: .size _Z6init_bPi, .Lfunc_end1-_Z6init_bPi .cfi_endproc # -- End function .globl _Z7check_aPi # -- Begin function _Z7check_aPi .p2align 4, 0x90 .type _Z7check_aPi,@function _Z7check_aPi: # @_Z7check_aPi .cfi_startproc # %bb.0: movl $1, %eax movl $1, %ecx xorl %edx, %edx .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 movl -4(%rdi,%rcx,4), %esi cmpq %rsi, %rcx cmovnel %edx, %eax incq %rcx cmpq $102401, %rcx # imm = 0x19001 jne .LBB2_1 # %bb.2: retq .Lfunc_end2: .size _Z7check_aPi, .Lfunc_end2-_Z7check_aPi .cfi_endproc # -- End function .globl _Z24__device_stub__mykernel1PiS_i # -- Begin function _Z24__device_stub__mykernel1PiS_i .p2align 4, 0x90 .type _Z24__device_stub__mykernel1PiS_i,@function _Z24__device_stub__mykernel1PiS_i: # @_Z24__device_stub__mykernel1PiS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9mykernel1PiS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end3: .size _Z24__device_stub__mykernel1PiS_i, .Lfunc_end3-_Z24__device_stub__mykernel1PiS_i .cfi_endproc # -- End function .globl _Z24__device_stub__mykernel2PiS_S_i # -- Begin function _Z24__device_stub__mykernel2PiS_S_i .p2align 4, 0x90 .type _Z24__device_stub__mykernel2PiS_S_i,@function _Z24__device_stub__mykernel2PiS_S_i: # @_Z24__device_stub__mykernel2PiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9mykernel2PiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end4: .size _Z24__device_stub__mykernel2PiS_S_i, .Lfunc_end4-_Z24__device_stub__mykernel2PiS_S_i .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $176, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %rbp, -16 movl $409600, %edi # imm = 0x64000 callq malloc movq %rax, %rbx movl $409600, %edi # imm = 0x64000 callq malloc movq %rax, %r14 movl $0, 12(%rsp) xorl %eax, %eax .p2align 4, 0x90 .LBB5_1: # =>This Inner Loop Header: Depth=1 movl $1, (%rbx,%rax,4) incq %rax cmpq $102400, %rax # imm = 0x19000 jne .LBB5_1 # %bb.2: # %_Z6init_aPi.exit.preheader xorl %ecx, %ecx xorl %eax, %eax .p2align 4, 0x90 .LBB5_3: # %_Z6init_aPi.exit # =>This Inner Loop Header: Depth=1 movslq %ecx, %rcx movl %eax, (%r14,%rcx,4) movl %ecx, %edx subl $-128, %edx movslq %edx, %rdx imulq $1374402957, %rdx, %rdx # imm = 0x51EBB98D movq %rdx, %rsi shrq $63, %rsi sarq $47, %rdx addl %esi, %edx imull $102399, %edx, %edx # imm = 0x18FFF negl %edx addl %edx, %ecx addl $128, %ecx incl %eax cmpl $102399, %eax # imm = 0x18FFF jne .LBB5_3 # %bb.4: # %_Z6init_bPi.exit movl $102399, 409596(%r14) # imm = 0x18FFF leaq 24(%rsp), %rdi movl $409600, %esi # imm = 0x64000 callq hipMalloc leaq 40(%rsp), %rdi movl $409600, %esi # imm = 0x64000 callq hipMalloc leaq 16(%rsp), %rdi movl $4, %esi callq hipMalloc movq 24(%rsp), %rdi movl $1, %ebp movl $409600, %edx # imm = 0x64000 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movq 40(%rsp), %rdi movl $409600, %edx # imm = 0x64000 movq %r14, %rsi movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi leaq 12(%rsp), %rsi movl $4, %edx movl $1, %ecx callq hipMemcpy xorl %r14d, %r14d leaq 160(%rsp), %rdi xorl %esi, %esi callq gettimeofday movl $.L.str, %edi movl $16, %esi xorl %eax, %eax callq printf movl $.L.str.1, %edi movl $1024, %esi # imm = 0x400 xorl %eax, %eax callq printf movabsq $4294967312, %rdi # imm = 0x100000010 leaq 1008(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_6 # %bb.5: movq 24(%rsp), %rax movq 40(%rsp), %rcx movq 16(%rsp), %rdx movq %rax, 152(%rsp) movq %rcx, 144(%rsp) movq %rdx, 136(%rsp) movl $102400, 36(%rsp) # imm = 0x19000 leaq 152(%rsp), %rax movq %rax, 48(%rsp) leaq 144(%rsp), %rax movq %rax, 56(%rsp) leaq 136(%rsp), %rax movq %rax, 64(%rsp) leaq 36(%rsp), %rax movq %rax, 72(%rsp) leaq 120(%rsp), %rdi leaq 104(%rsp), %rsi leaq 96(%rsp), %rdx leaq 88(%rsp), %rcx callq __hipPopCallConfiguration movq 120(%rsp), %rsi movl 128(%rsp), %edx movq 104(%rsp), %rcx movl 112(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z9mykernel2PiS_S_i, %edi pushq 88(%rsp) .cfi_adjust_cfa_offset 8 pushq 104(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB5_6: callq hipDeviceSynchronize leaq 48(%rsp), %rdi xorl %esi, %esi callq gettimeofday movq 48(%rsp), %rsi movq 56(%rsp), %rdx subq 160(%rsp), %rsi subq 168(%rsp), %rdx movl $.L.str.2, %edi xorl %eax, %eax callq printf movq 24(%rsp), %rsi movl $409600, %edx # imm = 0x64000 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movq 16(%rsp), %rsi leaq 12(%rsp), %rdi movl $4, %edx movl $2, %ecx callq hipMemcpy movl 12(%rsp), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf movl $1, %eax .p2align 4, 0x90 .LBB5_7: # =>This Inner Loop Header: Depth=1 movl -4(%rbx,%rax,4), %ecx cmpq %rcx, %rax cmovnel %r14d, %ebp incq %rax cmpq $102401, %rax # imm = 0x19001 jne .LBB5_7 # %bb.8: # %_Z7check_aPi.exit testl %ebp, %ebp movl $.Lstr.1, %eax movl $.Lstr, %edi cmoveq %rax, %rdi callq puts@PLT movl $1, %eax addq $176, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size main, .Lfunc_end5-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9mykernel1PiS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9mykernel2PiS_S_i, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type _Z9mykernel1PiS_i,@object # @_Z9mykernel1PiS_i .section .rodata,"a",@progbits .globl _Z9mykernel1PiS_i .p2align 3, 0x0 _Z9mykernel1PiS_i: .quad _Z24__device_stub__mykernel1PiS_i .size _Z9mykernel1PiS_i, 8 .type _Z9mykernel2PiS_S_i,@object # @_Z9mykernel2PiS_S_i .globl _Z9mykernel2PiS_S_i .p2align 3, 0x0 _Z9mykernel2PiS_S_i: .quad _Z24__device_stub__mykernel2PiS_S_i .size _Z9mykernel2PiS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "nBlocks = %d\n" .size .L.str, 14 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "nThperBlock = %d\n" .size .L.str.1, 18 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "Execution duration: %ld (s) %ld (us)\n" .size .L.str.2, 38 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Total number of loops: %d\n" .size .L.str.3, 27 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z9mykernel1PiS_i" .size .L__unnamed_1, 18 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z9mykernel2PiS_S_i" .size .L__unnamed_2, 20 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "\n\n ******************** \n ***** SUCCESS! ***** \n ******************** \n" .size .Lstr, 72 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "\n\n ******************** \n ***/!\\ ERROR /!\\ *** \n ******************** \n" .size .Lstr.1, 72 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__mykernel1PiS_i .addrsig_sym _Z24__device_stub__mykernel2PiS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9mykernel1PiS_i .addrsig_sym _Z9mykernel2PiS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0017a494_00000000-6_Ex3.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z6init_aPi .type _Z6init_aPi, @function _Z6init_aPi: .LFB2057: .cfi_startproc endbr64 leaq 409600(%rdi), %rax .L4: movl $1, (%rdi) addq $4, %rdi cmpq %rax, %rdi jne .L4 ret .cfi_endproc .LFE2057: .size _Z6init_aPi, .-_Z6init_aPi .globl _Z6init_bPi .type _Z6init_bPi, @function _Z6init_bPi: .LFB2058: .cfi_startproc endbr64 movl $0, %eax movl $0, %edx .L7: movslq %eax, %rcx movl %edx, (%rdi,%rcx,4) leal 128(%rax), %ecx movslq %ecx, %rax imulq $1374402957, %rax, %rax sarq $47, %rax movl %ecx, %esi sarl $31, %esi subl %esi, %eax imull $102399, %eax, %esi movl %ecx, %eax subl %esi, %eax addl $1, %edx cmpl $102399, %edx jne .L7 movl $102399, 409596(%rdi) ret .cfi_endproc .LFE2058: .size _Z6init_bPi, .-_Z6init_bPi .globl _Z7check_aPi .type _Z7check_aPi, @function _Z7check_aPi: .LFB2059: .cfi_startproc endbr64 movl $1, %eax movl $1, %edx movl $0, %ecx .L11: cmpl %eax, -4(%rdi,%rax,4) cmovne %ecx, %edx addq $1, %rax cmpq $102401, %rax jne .L11 movl %edx, %eax ret .cfi_endproc .LFE2059: .size _Z7check_aPi, .-_Z7check_aPi .globl _Z31__device_stub__Z9mykernel1PiS_iPiS_i .type _Z31__device_stub__Z9mykernel1PiS_iPiS_i, @function _Z31__device_stub__Z9mykernel1PiS_iPiS_i: .LFB2085: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L18 .L14: movq 120(%rsp), %rax subq %fs:40, %rax jne .L19 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L18: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9mykernel1PiS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L14 .L19: call __stack_chk_fail@PLT .cfi_endproc .LFE2085: .size _Z31__device_stub__Z9mykernel1PiS_iPiS_i, .-_Z31__device_stub__Z9mykernel1PiS_iPiS_i .globl _Z9mykernel1PiS_i .type _Z9mykernel1PiS_i, @function _Z9mykernel1PiS_i: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z9mykernel1PiS_iPiS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _Z9mykernel1PiS_i, .-_Z9mykernel1PiS_i .globl _Z33__device_stub__Z9mykernel2PiS_S_iPiS_S_i .type _Z33__device_stub__Z9mykernel2PiS_S_iPiS_S_i, @function _Z33__device_stub__Z9mykernel2PiS_S_iPiS_S_i: .LFB2087: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L26 .L22: movq 136(%rsp), %rax subq %fs:40, %rax jne .L27 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L26: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9mykernel2PiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L22 .L27: call __stack_chk_fail@PLT .cfi_endproc .LFE2087: .size _Z33__device_stub__Z9mykernel2PiS_S_iPiS_S_i, .-_Z33__device_stub__Z9mykernel2PiS_S_iPiS_S_i .globl _Z9mykernel2PiS_S_i .type _Z9mykernel2PiS_S_i, @function _Z9mykernel2PiS_S_i: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z33__device_stub__Z9mykernel2PiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _Z9mykernel2PiS_S_i, .-_Z9mykernel2PiS_S_i .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "nBlocks = %d\n" .LC1: .string "nThperBlock = %d\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC2: .string "Execution duration: %ld (s) %ld (us)\n" .section .rodata.str1.1 .LC3: .string "Total number of loops: %d\n" .section .rodata.str1.8 .align 8 .LC4: .string "\n\n ******************** \n ***/!\\ ERROR /!\\ *** \n ******************** \n\n" .align 8 .LC5: .string "\n\n ******************** \n ***** SUCCESS! ***** \n ******************** \n\n" .text .globl main .type main, @function main: .LFB2060: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $120, %rsp .cfi_def_cfa_offset 144 movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax movl $409600, %edi call malloc@PLT movq %rax, %rbx movl $409600, %edi call malloc@PLT movq %rax, %rbp movl $0, 12(%rsp) movq %rbx, %rdi call _Z6init_aPi movq %rbp, %rdi call _Z6init_bPi leaq 16(%rsp), %rdi movl $409600, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $409600, %esi call cudaMalloc@PLT leaq 32(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movl $1, %ecx movl $409600, %edx movq %rbx, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $409600, %edx movq %rbp, %rsi movq 24(%rsp), %rdi call cudaMemcpy@PLT leaq 12(%rsp), %rsi movl $1, %ecx movl $4, %edx movq 32(%rsp), %rdi call cudaMemcpy@PLT movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) leaq 64(%rsp), %rdi movl $0, %esi call gettimeofday@PLT movl $16, %edx leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1024, %edx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $16, 40(%rsp) movl $1024, 52(%rsp) movl 60(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 52(%rsp), %rdx movq 40(%rsp), %rdi movl 48(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L36 .L31: call cudaDeviceSynchronize@PLT leaq 80(%rsp), %rdi movl $0, %esi call gettimeofday@PLT movq 88(%rsp), %rcx subq 72(%rsp), %rcx movq 80(%rsp), %rdx subq 64(%rsp), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $2, %ecx movl $409600, %edx movq 16(%rsp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT leaq 12(%rsp), %rdi movl $2, %ecx movl $4, %edx movq 32(%rsp), %rsi call cudaMemcpy@PLT movl 12(%rsp), %edx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %rbx, %rdi call _Z7check_aPi testl %eax, %eax jne .L32 leaq .LC4(%rip), %rsi movl $2, %edi call __printf_chk@PLT .L33: movq 104(%rsp), %rax subq %fs:40, %rax jne .L37 movl $1, %eax addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L36: .cfi_restore_state movl $102400, %ecx movq 32(%rsp), %rdx movq 24(%rsp), %rsi movq 16(%rsp), %rdi call _Z33__device_stub__Z9mykernel2PiS_S_iPiS_S_i jmp .L31 .L32: leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L33 .L37: call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size main, .-main .section .rodata.str1.1 .LC6: .string "_Z9mykernel2PiS_S_i" .LC7: .string "_Z9mykernel1PiS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2090: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq _Z9mykernel2PiS_S_i(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z9mykernel1PiS_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2090: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "Ex3.hip" .globl _Z6init_aPi # -- Begin function _Z6init_aPi .p2align 4, 0x90 .type _Z6init_aPi,@function _Z6init_aPi: # @_Z6init_aPi .cfi_startproc # %bb.0: xorl %eax, %eax .p2align 4, 0x90 .LBB0_1: # =>This Inner Loop Header: Depth=1 movl $1, (%rdi,%rax,4) incq %rax cmpq $102400, %rax # imm = 0x19000 jne .LBB0_1 # %bb.2: retq .Lfunc_end0: .size _Z6init_aPi, .Lfunc_end0-_Z6init_aPi .cfi_endproc # -- End function .globl _Z6init_bPi # -- Begin function _Z6init_bPi .p2align 4, 0x90 .type _Z6init_bPi,@function _Z6init_bPi: # @_Z6init_bPi .cfi_startproc # %bb.0: xorl %ecx, %ecx xorl %eax, %eax .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 movslq %ecx, %rcx movl %eax, (%rdi,%rcx,4) movl %ecx, %edx subl $-128, %edx movslq %edx, %rdx imulq $1374402957, %rdx, %rdx # imm = 0x51EBB98D movq %rdx, %rsi shrq $63, %rsi sarq $47, %rdx addl %esi, %edx imull $102399, %edx, %edx # imm = 0x18FFF negl %edx addl %edx, %ecx addl $128, %ecx incl %eax cmpl $102399, %eax # imm = 0x18FFF jne .LBB1_1 # %bb.2: movl $102399, 409596(%rdi) # imm = 0x18FFF retq .Lfunc_end1: .size _Z6init_bPi, .Lfunc_end1-_Z6init_bPi .cfi_endproc # -- End function .globl _Z7check_aPi # -- Begin function _Z7check_aPi .p2align 4, 0x90 .type _Z7check_aPi,@function _Z7check_aPi: # @_Z7check_aPi .cfi_startproc # %bb.0: movl $1, %eax movl $1, %ecx xorl %edx, %edx .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 movl -4(%rdi,%rcx,4), %esi cmpq %rsi, %rcx cmovnel %edx, %eax incq %rcx cmpq $102401, %rcx # imm = 0x19001 jne .LBB2_1 # %bb.2: retq .Lfunc_end2: .size _Z7check_aPi, .Lfunc_end2-_Z7check_aPi .cfi_endproc # -- End function .globl _Z24__device_stub__mykernel1PiS_i # -- Begin function _Z24__device_stub__mykernel1PiS_i .p2align 4, 0x90 .type _Z24__device_stub__mykernel1PiS_i,@function _Z24__device_stub__mykernel1PiS_i: # @_Z24__device_stub__mykernel1PiS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9mykernel1PiS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end3: .size _Z24__device_stub__mykernel1PiS_i, .Lfunc_end3-_Z24__device_stub__mykernel1PiS_i .cfi_endproc # -- End function .globl _Z24__device_stub__mykernel2PiS_S_i # -- Begin function _Z24__device_stub__mykernel2PiS_S_i .p2align 4, 0x90 .type _Z24__device_stub__mykernel2PiS_S_i,@function _Z24__device_stub__mykernel2PiS_S_i: # @_Z24__device_stub__mykernel2PiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9mykernel2PiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end4: .size _Z24__device_stub__mykernel2PiS_S_i, .Lfunc_end4-_Z24__device_stub__mykernel2PiS_S_i .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $176, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %rbp, -16 movl $409600, %edi # imm = 0x64000 callq malloc movq %rax, %rbx movl $409600, %edi # imm = 0x64000 callq malloc movq %rax, %r14 movl $0, 12(%rsp) xorl %eax, %eax .p2align 4, 0x90 .LBB5_1: # =>This Inner Loop Header: Depth=1 movl $1, (%rbx,%rax,4) incq %rax cmpq $102400, %rax # imm = 0x19000 jne .LBB5_1 # %bb.2: # %_Z6init_aPi.exit.preheader xorl %ecx, %ecx xorl %eax, %eax .p2align 4, 0x90 .LBB5_3: # %_Z6init_aPi.exit # =>This Inner Loop Header: Depth=1 movslq %ecx, %rcx movl %eax, (%r14,%rcx,4) movl %ecx, %edx subl $-128, %edx movslq %edx, %rdx imulq $1374402957, %rdx, %rdx # imm = 0x51EBB98D movq %rdx, %rsi shrq $63, %rsi sarq $47, %rdx addl %esi, %edx imull $102399, %edx, %edx # imm = 0x18FFF negl %edx addl %edx, %ecx addl $128, %ecx incl %eax cmpl $102399, %eax # imm = 0x18FFF jne .LBB5_3 # %bb.4: # %_Z6init_bPi.exit movl $102399, 409596(%r14) # imm = 0x18FFF leaq 24(%rsp), %rdi movl $409600, %esi # imm = 0x64000 callq hipMalloc leaq 40(%rsp), %rdi movl $409600, %esi # imm = 0x64000 callq hipMalloc leaq 16(%rsp), %rdi movl $4, %esi callq hipMalloc movq 24(%rsp), %rdi movl $1, %ebp movl $409600, %edx # imm = 0x64000 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movq 40(%rsp), %rdi movl $409600, %edx # imm = 0x64000 movq %r14, %rsi movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi leaq 12(%rsp), %rsi movl $4, %edx movl $1, %ecx callq hipMemcpy xorl %r14d, %r14d leaq 160(%rsp), %rdi xorl %esi, %esi callq gettimeofday movl $.L.str, %edi movl $16, %esi xorl %eax, %eax callq printf movl $.L.str.1, %edi movl $1024, %esi # imm = 0x400 xorl %eax, %eax callq printf movabsq $4294967312, %rdi # imm = 0x100000010 leaq 1008(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_6 # %bb.5: movq 24(%rsp), %rax movq 40(%rsp), %rcx movq 16(%rsp), %rdx movq %rax, 152(%rsp) movq %rcx, 144(%rsp) movq %rdx, 136(%rsp) movl $102400, 36(%rsp) # imm = 0x19000 leaq 152(%rsp), %rax movq %rax, 48(%rsp) leaq 144(%rsp), %rax movq %rax, 56(%rsp) leaq 136(%rsp), %rax movq %rax, 64(%rsp) leaq 36(%rsp), %rax movq %rax, 72(%rsp) leaq 120(%rsp), %rdi leaq 104(%rsp), %rsi leaq 96(%rsp), %rdx leaq 88(%rsp), %rcx callq __hipPopCallConfiguration movq 120(%rsp), %rsi movl 128(%rsp), %edx movq 104(%rsp), %rcx movl 112(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z9mykernel2PiS_S_i, %edi pushq 88(%rsp) .cfi_adjust_cfa_offset 8 pushq 104(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB5_6: callq hipDeviceSynchronize leaq 48(%rsp), %rdi xorl %esi, %esi callq gettimeofday movq 48(%rsp), %rsi movq 56(%rsp), %rdx subq 160(%rsp), %rsi subq 168(%rsp), %rdx movl $.L.str.2, %edi xorl %eax, %eax callq printf movq 24(%rsp), %rsi movl $409600, %edx # imm = 0x64000 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movq 16(%rsp), %rsi leaq 12(%rsp), %rdi movl $4, %edx movl $2, %ecx callq hipMemcpy movl 12(%rsp), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf movl $1, %eax .p2align 4, 0x90 .LBB5_7: # =>This Inner Loop Header: Depth=1 movl -4(%rbx,%rax,4), %ecx cmpq %rcx, %rax cmovnel %r14d, %ebp incq %rax cmpq $102401, %rax # imm = 0x19001 jne .LBB5_7 # %bb.8: # %_Z7check_aPi.exit testl %ebp, %ebp movl $.Lstr.1, %eax movl $.Lstr, %edi cmoveq %rax, %rdi callq puts@PLT movl $1, %eax addq $176, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size main, .Lfunc_end5-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9mykernel1PiS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9mykernel2PiS_S_i, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type _Z9mykernel1PiS_i,@object # @_Z9mykernel1PiS_i .section .rodata,"a",@progbits .globl _Z9mykernel1PiS_i .p2align 3, 0x0 _Z9mykernel1PiS_i: .quad _Z24__device_stub__mykernel1PiS_i .size _Z9mykernel1PiS_i, 8 .type _Z9mykernel2PiS_S_i,@object # @_Z9mykernel2PiS_S_i .globl _Z9mykernel2PiS_S_i .p2align 3, 0x0 _Z9mykernel2PiS_S_i: .quad _Z24__device_stub__mykernel2PiS_S_i .size _Z9mykernel2PiS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "nBlocks = %d\n" .size .L.str, 14 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "nThperBlock = %d\n" .size .L.str.1, 18 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "Execution duration: %ld (s) %ld (us)\n" .size .L.str.2, 38 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Total number of loops: %d\n" .size .L.str.3, 27 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z9mykernel1PiS_i" .size .L__unnamed_1, 18 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z9mykernel2PiS_S_i" .size .L__unnamed_2, 20 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "\n\n ******************** \n ***** SUCCESS! ***** \n ******************** \n" .size .Lstr, 72 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "\n\n ******************** \n ***/!\\ ERROR /!\\ *** \n ******************** \n" .size .Lstr.1, 72 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__mykernel1PiS_i .addrsig_sym _Z24__device_stub__mykernel2PiS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9mykernel1PiS_i .addrsig_sym _Z9mykernel2PiS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_